From 89f0122f582e89829163e2855019302f03aa5ed6 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Fri, 1 Aug 2025 17:07:24 -0300 Subject: [PATCH 1/9] Add rule-based segment for streaming --- dtos/notification.go | 33 +++++ dtos/split.go | 4 +- engine/validator/matchers.go | 28 +++++ push/parser.go | 60 +++++++-- push/parser_test.go | 116 +++++++++--------- storage/inmemory/mutexmap/rulebasedsegment.go | 2 + storage/interfaces.go | 4 +- synchronizer/worker/split/split.go | 48 +++++++- 8 files changed, 217 insertions(+), 78 deletions(-) diff --git a/dtos/notification.go b/dtos/notification.go index 742e9b00..5a10bc8a 100644 --- a/dtos/notification.go +++ b/dtos/notification.go @@ -19,6 +19,7 @@ const ( UpdateTypeSegmentChange = "SEGMENT_UPDATE" UpdateTypeContol = "CONTROL" UpdateTypeLargeSegmentChange = "LS_DEFINITION_UPDATE" + UpdateTypeRuleBasedChange = "RB_SEGMENT_UPDATE" ) // Control type constants @@ -332,6 +333,36 @@ func (u *LargeSegmentChangeUpdate) String() string { u.Channel(), u.ChangeNumber(), len(u.LargeSegments), u.Timestamp()) } +// SplitChangeUpdate represents a SplitChange notification generated in the split servers +type RuleBasedChangeUpdate struct { + BaseUpdate + previousChangeNumber *int64 + ruleBasedSegment *RuleBasedSegmentDTO +} + +func NewRuleBasedChangeUpdate(baseUpdate BaseUpdate, pcn *int64, ruleBasedSegment *RuleBasedSegmentDTO) *RuleBasedChangeUpdate { + return &RuleBasedChangeUpdate{ + BaseUpdate: baseUpdate, + previousChangeNumber: pcn, + ruleBasedSegment: ruleBasedSegment, + } +} + +// UpdateType is always UpdateTypeRuleBasedSegmentChange for Rule-based Segmet Updates +func (u *RuleBasedChangeUpdate) UpdateType() string { return UpdateTypeRuleBasedChange } + +// String returns the string representation of a segment update notification +func (u *RuleBasedChangeUpdate) String() string { + return fmt.Sprintf("LargeSegmentChange(channel=%s,changeNumber=%d,timestamp=%d)", + u.Channel(), u.ChangeNumber(), u.Timestamp()) +} + +// PreviousChangeNumber returns previous change number +func (u *RuleBasedChangeUpdate) PreviousChangeNumber() *int64 { return u.previousChangeNumber } + +// RuleBasedSegment returns rule-based segment definiiton or nil +func (u *RuleBasedChangeUpdate) RuleBasedsegment() *RuleBasedSegmentDTO { return u.ruleBasedSegment } + // Compile-type assertions of interface requirements var _ Event = &AblyError{} var _ Message = &OccupancyMessage{} @@ -340,7 +371,9 @@ var _ Message = &SplitKillUpdate{} var _ Message = &SegmentChangeUpdate{} var _ Message = &ControlUpdate{} var _ Message = &LargeSegmentChangeUpdate{} +var _ Message = &RuleBasedChangeUpdate{} var _ Update = &SplitChangeUpdate{} var _ Update = &SplitKillUpdate{} var _ Update = &SegmentChangeUpdate{} var _ Update = &LargeSegmentChangeUpdate{} +var _ Update = &RuleBasedChangeUpdate{} diff --git a/dtos/split.go b/dtos/split.go index 15615552..59a40dd4 100644 --- a/dtos/split.go +++ b/dtos/split.go @@ -6,8 +6,8 @@ import ( // SplitChangesDTO structure to map JSON message sent by Split servers. type SplitChangesDTO struct { - FeatureFlags FeatureFlagsDTO `json:"ff"` - RuleBasedSegments []RuleBasedSegmentsDTO `json:"rbs"` + FeatureFlags FeatureFlagsDTO `json:"ff"` + RuleBasedSegments RuleBasedSegmentsDTO `json:"rbs"` } type FeatureFlagsDTO struct { diff --git a/engine/validator/matchers.go b/engine/validator/matchers.go index 559fd496..9d500b4e 100644 --- a/engine/validator/matchers.go +++ b/engine/validator/matchers.go @@ -21,6 +21,15 @@ var unsupportedMatcherConditionReplacement []dtos.ConditionDTO = []dtos.Conditio }, }} +// unsupportedMatcherRBConditionReplacement is the default condition to be used when a matcher is not supported +var unsupportedMatcherRBConditionReplacement []dtos.RuleBasedConditionDTO = []dtos.RuleBasedConditionDTO{{ + ConditionType: grammar.ConditionTypeWhitelist, + MatcherGroup: dtos.MatcherGroupDTO{ + Combiner: "AND", + Matchers: []dtos.MatcherDTO{{MatcherType: grammar.MatcherTypeAllKeys, Negate: false}}, + }, +}} + func shouldOverrideConditions(conditions []dtos.ConditionDTO, logger logging.LoggerInterface) bool { for _, condition := range conditions { for _, matcher := range condition.MatcherGroup.Matchers { @@ -33,6 +42,18 @@ func shouldOverrideConditions(conditions []dtos.ConditionDTO, logger logging.Log return false } +func shouldOverrideRBConditions(conditions []dtos.RuleBasedConditionDTO, logger logging.LoggerInterface) bool { + for _, condition := range conditions { + for _, matcher := range condition.MatcherGroup.Matchers { + _, err := grammar.BuildMatcher(&matcher, &injection.Context{}, logger) + if _, ok := err.(datatypes.UnsupportedMatcherError); ok { + return true + } + } + } + return false +} + // ProcessMatchers processes the matchers of a split and validates them func ProcessMatchers(split *dtos.SplitDTO, logger logging.LoggerInterface) { if shouldOverrideConditions(split.Conditions, logger) { @@ -40,6 +61,13 @@ func ProcessMatchers(split *dtos.SplitDTO, logger logging.LoggerInterface) { } } +// ProcessMatchers processes the matchers of a rule-based and validates them +func ProcessRBMatchers(ruleBased *dtos.RuleBasedSegmentDTO, logger logging.LoggerInterface) { + if shouldOverrideRBConditions(ruleBased.Conditions, logger) { + ruleBased.Conditions = unsupportedMatcherRBConditionReplacement + } +} + // MakeUnsupportedMatcherConditionReplacement returns the default condition to be used when a matcher is not supported func MakeUnsupportedMatcherConditionReplacement() []dtos.ConditionDTO { return unsupportedMatcherConditionReplacement diff --git a/push/parser.go b/push/parser.go index 5d440275..752c27fd 100644 --- a/push/parser.go +++ b/push/parser.go @@ -140,6 +140,13 @@ func (p *NotificationParserImpl) parseUpdate(data *genericData, nested *genericM return nil, p.onLargeSegmentUpdate(dtos.NewLargeSegmentChangeUpdate(base, largeSegments)) case dtos.UpdateTypeContol: return p.onControlUpdate(dtos.NewControlUpdate(base.BaseMessage, nested.ControlType)), nil + //case dtos.UpdateTypeRuleBasedChange: + //ruleBased := p.processRuleBasedMessage(nested) + //to do: + // if ruleBased == nil { + // return nil, p.onSplitUpdate(dtos.NewRuleBasedChangeUpdate(base, nil, nil)) + // } + // return nil, p.onSplitUpdate(dtos.NewRuleBasedChangeUpdate(base, &nested.PreviousChangeNumber, ruleBased)) default: // TODO: log full event in debug mode return nil, fmt.Errorf("invalid update type: %s", nested.Type) @@ -157,10 +164,10 @@ func (p *NotificationParserImpl) processLargeSegmentMessage(nested *genericMessa func (p *NotificationParserImpl) processMessage(nested *genericMessageData) *dtos.SplitDTO { compressType := getCompressType(nested.CompressType) - if nested.FeatureFlagDefinition == nil || compressType == nil { + if nested.Definition == nil || compressType == nil { return nil } - ffDecoded, err := p.dataUtils.Decode(common.StringFromRef(nested.FeatureFlagDefinition)) + ffDecoded, err := p.dataUtils.Decode(common.StringFromRef(nested.Definition)) if err != nil { p.logger.Debug(fmt.Sprintf("error decoding FeatureFlagDefinition: '%s'", err.Error())) return nil @@ -182,6 +189,33 @@ func (p *NotificationParserImpl) processMessage(nested *genericMessageData) *dto return &featureFlag } +func (p *NotificationParserImpl) processRuleBasedMessage(nested *genericMessageData) *dtos.RuleBasedSegmentDTO { + compressType := getCompressType(nested.CompressType) + if nested.Definition == nil || compressType == nil { + return nil + } + ruleBasedDecoded, err := p.dataUtils.Decode(common.StringFromRef(nested.Definition)) + if err != nil { + p.logger.Debug(fmt.Sprintf("error decoding RuleBasedSegmentDefinition: '%s'", err.Error())) + return nil + } + if common.IntFromRef(compressType) != datautils.None { + ruleBasedDecoded, err = p.dataUtils.Decompress(ruleBasedDecoded, common.IntFromRef(compressType)) + if err != nil { + p.logger.Debug(fmt.Sprintf("error decompressing RulebasedSegmentDefinition: '%s'", err.Error())) + return nil + } + } + + var ruleBased dtos.RuleBasedSegmentDTO + err = json.Unmarshal([]byte(ruleBasedDecoded), &ruleBased) + if err != nil { + p.logger.Debug(fmt.Sprintf("error parsing rule-based segment json definition: '%s'", err.Error())) + return nil + } + return &ruleBased +} + type genericData struct { // Error associated data @@ -207,17 +241,17 @@ type metrics struct { } type genericMessageData struct { - Metrics metrics `json:"metrics"` - Type string `json:"type"` - ChangeNumber int64 `json:"changeNumber"` - SplitName string `json:"splitName"` - DefaultTreatment string `json:"defaultTreatment"` - SegmentName string `json:"segmentName"` - ControlType string `json:"controlType"` - PreviousChangeNumber int64 `json:"pcn"` - CompressType *int `json:"c"` - FeatureFlagDefinition *string `json:"d"` - LargeSegments []dtos.LargeSegmentRFDResponseDTO `json:"ls"` + Metrics metrics `json:"metrics"` + Type string `json:"type"` + ChangeNumber int64 `json:"changeNumber"` + SplitName string `json:"splitName"` + DefaultTreatment string `json:"defaultTreatment"` + SegmentName string `json:"segmentName"` + ControlType string `json:"controlType"` + PreviousChangeNumber int64 `json:"pcn"` + CompressType *int `json:"c"` + Definition *string `json:"d"` + LargeSegments []dtos.LargeSegmentRFDResponseDTO `json:"ls"` // {\"type\":\"SPLIT_UPDATE\",\"changeNumber\":1612909342671}"} } diff --git a/push/parser_test.go b/push/parser_test.go index dac194b1..d37f1525 100644 --- a/push/parser_test.go +++ b/push/parser_test.go @@ -67,11 +67,11 @@ func TestParseInstantFF(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), }) mainJSON, _ := json.Marshal(genericData{ Timestamp: 123, @@ -119,11 +119,11 @@ func TestParseInstantFFCompressTypeZlib(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), }) mainJSON, _ := json.Marshal(genericData{ Timestamp: 123, @@ -171,11 +171,11 @@ func TestParseInstantFFCompressTypeGzip(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), }) mainJSON, _ := json.Marshal(genericData{ Timestamp: 123, @@ -222,10 +222,10 @@ func TestParseInstantFFCompressTypeNil(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + Definition: common.StringRef(ffDefinition), }) mainJSON, _ := json.Marshal(genericData{ Timestamp: 123, @@ -270,11 +270,11 @@ func TestParseInstantFFCompressTypeGreaterTwo(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), }) mainJSON, _ := json.Marshal(genericData{ Timestamp: 123, @@ -653,11 +653,11 @@ func TestParseFFDtoNotCompress(t *testing.T) { compressType := 0 ffDefinition := "eyJ0cmFmZmljVHlwZU5hbWUiOiJ1c2VyIiwiaWQiOiJkNDMxY2RkMC1iMGJlLTExZWEtOGE4MC0xNjYwYWRhOWNlMzkiLCJuYW1lIjoibWF1cm9famF2YSIsInRyYWZmaWNBbGxvY2F0aW9uIjoxMDAsInRyYWZmaWNBbGxvY2F0aW9uU2VlZCI6LTkyMzkxNDkxLCJzZWVkIjotMTc2OTM3NzYwNCwic3RhdHVzIjoiQUNUSVZFIiwia2lsbGVkIjpmYWxzZSwiZGVmYXVsdFRyZWF0bWVudCI6Im9mZiIsImNoYW5nZU51bWJlciI6MTY4NDMyOTg1NDM4NSwiYWxnbyI6MiwiY29uZmlndXJhdGlvbnMiOnt9LCJjb25kaXRpb25zIjpbeyJjb25kaXRpb25UeXBlIjoiV0hJVEVMSVNUIiwibWF0Y2hlckdyb3VwIjp7ImNvbWJpbmVyIjoiQU5EIiwibWF0Y2hlcnMiOlt7Im1hdGNoZXJUeXBlIjoiV0hJVEVMSVNUIiwibmVnYXRlIjpmYWxzZSwid2hpdGVsaXN0TWF0Y2hlckRhdGEiOnsid2hpdGVsaXN0IjpbImFkbWluIiwibWF1cm8iLCJuaWNvIl19fV19LCJwYXJ0aXRpb25zIjpbeyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9XSwibGFiZWwiOiJ3aGl0ZWxpc3RlZCJ9LHsiY29uZGl0aW9uVHlwZSI6IlJPTExPVVQiLCJtYXRjaGVyR3JvdXAiOnsiY29tYmluZXIiOiJBTkQiLCJtYXRjaGVycyI6W3sia2V5U2VsZWN0b3IiOnsidHJhZmZpY1R5cGUiOiJ1c2VyIn0sIm1hdGNoZXJUeXBlIjoiSU5fU0VHTUVOVCIsIm5lZ2F0ZSI6ZmFsc2UsInVzZXJEZWZpbmVkU2VnbWVudE1hdGNoZXJEYXRhIjp7InNlZ21lbnROYW1lIjoibWF1ci0yIn19XX0sInBhcnRpdGlvbnMiOlt7InRyZWF0bWVudCI6Im9uIiwic2l6ZSI6MH0seyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9LHsidHJlYXRtZW50IjoiVjQiLCJzaXplIjowfSx7InRyZWF0bWVudCI6InY1Iiwic2l6ZSI6MH1dLCJsYWJlbCI6ImluIHNlZ21lbnQgbWF1ci0yIn0seyJjb25kaXRpb25UeXBlIjoiUk9MTE9VVCIsIm1hdGNoZXJHcm91cCI6eyJjb21iaW5lciI6IkFORCIsIm1hdGNoZXJzIjpbeyJrZXlTZWxlY3RvciI6eyJ0cmFmZmljVHlwZSI6InVzZXIifSwibWF0Y2hlclR5cGUiOiJBTExfS0VZUyIsIm5lZ2F0ZSI6ZmFsc2V9XX0sInBhcnRpdGlvbnMiOlt7InRyZWF0bWVudCI6Im9uIiwic2l6ZSI6MH0seyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9LHsidHJlYXRtZW50IjoiVjQiLCJzaXplIjowfSx7InRyZWF0bWVudCI6InY1Iiwic2l6ZSI6MH1dLCJsYWJlbCI6ImRlZmF1bHQgcnVsZSJ9XX0=" data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -674,11 +674,11 @@ func TestParseFFDtoNotCompressWrongDefinition(t *testing.T) { compressType := 0 ffDefinition := "eyJ0cmFmZmldfsfsfjVHlwZU5hbWUiOiJ1c2VyIiwiaWQiOiJkNDMxY2RkMC1iMGJlLTExZWEtOGE4MC0xNjYwYWRhOWNlMzkiLCJuYW1lIjoibWF1cm9famF2YSIsInRyYWZmaWNBbGxvY2F0aW9uIjoxMDAsInRyYWZmaWNBbGxvY2F0aW9uU2VlZCI6LTkyMzkxNDkxLCJzZWVkIjotMTc2OTM3NzYwNCwic3RhdHVzIjoiQUNUSVZFIiwia2lsbGVkIjpmYWxzZSwiZGVmYXVsdFRyZWF0bWVudCI6Im9mZiIsImNoYW5nZU51bWJlciI6MTY4NDMyOTg1NDM4NSwiYWxnbyI6MiwiY29uZmlndXJhdGlvbnMiOnt9LCJjb25kaXRpb25zIjpbeyJjb25kaXRpb25UeXBlIjoiV0hJVEVMSVNUIiwibWF0Y2hlckdyb3VwIjp7ImNvbWJpbmVyIjoiQU5EIiwibWF0Y2hlcnMiOlt7Im1hdGNoZXJUeXBlIjoiV0hJVEVMSVNUIiwibmVnYXRlIjpmYWxzZSwid2hpdGVsaXN0TWF0Y2hlckRhdGEiOnsid2hpdGVsaXN0IjpbImFkbWluIiwibWF1cm8iLCJuaWNvIl19fV19LCJwYXJ0aXRpb25zIjpbeyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9XSwibGFiZWwiOiJ3aGl0ZWxpc3RlZCJ9LHsiY29uZGl0aW9uVHlwZSI6IlJPTExPVVQiLCJtYXRjaGVyR3JvdXAiOnsiY29tYmluZXIiOiJBTkQiLCJtYXRjaGVycyI6W3sia2V5U2VsZWN0b3IiOnsidHJhZmZpY1R5cGUiOiJ1c2VyIn0sIm1hdGNoZXJUeXBlIjoiSU5fU0VHTUVOVCIsIm5lZ2F0ZSI6ZmFsc2UsInVzZXJEZWZpbmVkU2VnbWVudE1hdGNoZXJEYXRhIjp7InNlZ21lbnROYW1lIjoibWF1ci0yIn19XX0sInBhcnRpdGlvbnMiOlt7InRyZWF0bWVudCI6Im9uIiwic2l6ZSI6MH0seyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9LHsidHJlYXRtZW50IjoiVjQiLCJzaXplIjowfSx7InRyZWF0bWVudCI6InY1Iiwic2l6ZSI6MH1dLCJsYWJlbCI6ImluIHNlZ21lbnQgbWF1ci0yIn0seyJjb25kaXRpb25UeXBlIjoiUk9MTE9VVCIsIm1hdGNoZXJHcm91cCI6eyJjb21iaW5lciI6IkFORCIsIm1hdGNoZXJzIjpbeyJrZXlTZWxlY3RvciI6eyJ0cmFmZmljVHlwZSI6InVzZXIifSwibWF0Y2hlclR5cGUiOiJBTExfS0VZUyIsIm5lZ2F0ZSI6ZmFsc2V9XX0sInBhcnRpdGlvbnMiOlt7InRyZWF0bWVudCI6Im9uIiwic2l6ZSI6MH0seyJ0cmVhdG1lbnQiOiJvZmYiLCJzaXplIjoxMDB9LHsidHJlYXRtZW50IjoiVjQiLCJzaXplIjowfSx7InRyZWF0bWVudCI6InY1Iiwic2l6ZSI6MH1dLCJsYWJlbCI6ImRlZmF1bHQgcnVsZSJ9XX0=" data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -695,11 +695,11 @@ func TestParseFFDtoGzipCompress(t *testing.T) { compressType := 1 ffDefinition := FF_DEFINITION_GZIP data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -716,11 +716,11 @@ func TestParseFFDtoZlibCompressWrongCompressType(t *testing.T) { compressType := 2 ffDefinition := FF_DEFINITION_GZIP data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -737,11 +737,11 @@ func TestParseFFDtoZlibCompress(t *testing.T) { compressType := 2 ffDefinition := FF_DEFINITION_ZLIB data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -758,11 +758,11 @@ func TestParseFFDtoGzipCompressWrongDefinition(t *testing.T) { compressType := 1 ffDefinition := FF_DEFINITION_ZLIB data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - CompressType: common.IntRef(compressType), - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + CompressType: common.IntRef(compressType), + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ @@ -778,10 +778,10 @@ func TestParseFFDtoGzipCompressWrongDefinition(t *testing.T) { func TestParseFFDtoCompressTypeNil(t *testing.T) { ffDefinition := FF_DEFINITION_ZLIB data := genericMessageData{ - Type: dtos.UpdateTypeSplitChange, - ChangeNumber: 123, - PreviousChangeNumber: 1, - FeatureFlagDefinition: common.StringRef(ffDefinition), + Type: dtos.UpdateTypeSplitChange, + ChangeNumber: 123, + PreviousChangeNumber: 1, + Definition: common.StringRef(ffDefinition), } logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ diff --git a/storage/inmemory/mutexmap/rulebasedsegment.go b/storage/inmemory/mutexmap/rulebasedsegment.go index 2933bcbd..03c9c4c1 100644 --- a/storage/inmemory/mutexmap/rulebasedsegment.go +++ b/storage/inmemory/mutexmap/rulebasedsegment.go @@ -53,6 +53,8 @@ func (r *RuleBasedSegmentsStorageImpl) SetChangeNumber(till int64) error { // ChangeNumber return the actual rule-based till func (r *RuleBasedSegmentsStorageImpl) ChangeNumber() int64 { + r.tillMutex.RLock() + defer r.tillMutex.RUnlock() return r.till } diff --git a/storage/interfaces.go b/storage/interfaces.go index 8a370077..47c3ae95 100644 --- a/storage/interfaces.go +++ b/storage/interfaces.go @@ -269,13 +269,13 @@ type LargeSegmentsStorage interface { // RuleBasedSegmentStorageProducer interface should be implemented by all structs that offer writing rule-based segments type RuleBasedSegmentStorageProducer interface { SetChangeNumber(name string, till int64) - Update(toAdd []dtos.RuleBasedSegmentDTO, toRemove []string, till int64) + Update(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) Clear() } // RuleBasedStorageConsumer interface should be implemented by all structs that ofer reading rule-based segments type RuleBasedSegmentStorageConsumer interface { - ChangeNumber(name string) int64 + ChangeNumber() int64 All() []dtos.RuleBasedSegmentDTO RuleBasedSegmentNames() []string Contains(ruleBasedSegmentNames []string) bool diff --git a/synchronizer/worker/split/split.go b/synchronizer/worker/split/split.go index 80b5dbf6..577ed6ca 100644 --- a/synchronizer/worker/split/split.go +++ b/synchronizer/worker/split/split.go @@ -19,6 +19,7 @@ import ( const ( matcherTypeInSegment = "IN_SEGMENT" matcherTypeInLargeSegment = "IN_LARGE_SEGMENT" + matcherTypeInRuleBasedSegment = "IN_RULE_BASED_SEGMENT" scRequestURITooLong = 414 onDemandFetchBackoffBase = int64(10) // backoff base starting at 10 seconds onDemandFetchBackoffMaxWait = 60 * time.Second // don't sleep for more than 1 minute @@ -56,6 +57,7 @@ type internalSplitSync struct { type UpdaterImpl struct { splitStorage storage.SplitStorage splitFetcher service.SplitFetcher + ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage logger logging.LoggerInterface runtimeTelemetry storage.TelemetryRuntimeProducer hcMonitor application.MonitorProducerInterface @@ -89,6 +91,9 @@ func (s *UpdaterImpl) processUpdate(splitChanges *dtos.SplitChangesDTO) { activeSplits, inactiveSplits := s.processFeatureFlagChanges(splitChanges) // Add/Update active splits s.splitStorage.Update(activeSplits, inactiveSplits, splitChanges.FeatureFlags.Till) + activeRB, inactiveRB := s.processRuleBasedSegmentChanges(splitChanges) + // Add/Update active rule-based + s.ruleBasedSegmentStorage.Update(activeRB, inactiveRB, splitChanges.RuleBasedSegments.Till) } // fetchUntil Hit endpoint, update storage and return when since==till. @@ -99,12 +104,14 @@ func (s *UpdaterImpl) fetchUntil(fetchOptions *service.FlagRequestParams) (*Upda largeSegmentReferences := make([]string, 0, 10) var err error var currentSince int64 + var currentRBSince int64 for { // Fetch until since==till currentSince, _ = s.splitStorage.ChangeNumber() + currentRBSince = s.ruleBasedSegmentStorage.ChangeNumber() before := time.Now() var splitChanges *dtos.SplitChangesDTO - splitChanges, err = s.splitFetcher.Fetch(fetchOptions.WithChangeNumber(currentSince)) + splitChanges, err = s.splitFetcher.Fetch(fetchOptions.WithChangeNumber(currentSince).WithChangeNumberRB(currentRBSince)) if err != nil { if httpError, ok := err.(*dtos.HTTPError); ok { if httpError.Code == scRequestURITooLong { @@ -154,7 +161,8 @@ func (s *UpdaterImpl) attemptSplitSync(fetchOptions *service.FlagRequestParams, func (s *UpdaterImpl) SynchronizeSplits(till *int64) (*UpdateResult, error) { s.hcMonitor.NotifyEvent(application.Splits) currentSince, _ := s.splitStorage.ChangeNumber() - if till != nil && *till < currentSince { // the passed till is less than change_number, no need to perform updates + currentRBSince := s.ruleBasedSegmentStorage.ChangeNumber() + if till != nil && *till < currentSince && *till < currentRBSince { // the passed till is less than change_number, no need to perform updates return &UpdateResult{}, nil } @@ -215,6 +223,19 @@ func appendLargeSegmentNames(dst []string, splitChanges *dtos.SplitChangesDTO) [ return dst } +func appendRuleBasedSegmentNames(dst []string, splitChanges *dtos.SplitChangesDTO) []string { + for _, split := range splitChanges.FeatureFlags.Splits { + for _, cond := range split.Conditions { + for _, matcher := range cond.MatcherGroup.Matchers { + if matcher.MatcherType == matcherTypeInRuleBasedSegment && matcher.UserDefinedSegment != nil { + dst = append(dst, matcher.UserDefinedSegment.SegmentName) + } + } + } + } + return dst +} + func (s *UpdaterImpl) processFeatureFlagChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.SplitDTO, []dtos.SplitDTO) { toRemove := make([]dtos.SplitDTO, 0, len(splitChanges.FeatureFlags.Splits)) toAdd := make([]dtos.SplitDTO, 0, len(splitChanges.FeatureFlags.Splits)) @@ -229,6 +250,20 @@ func (s *UpdaterImpl) processFeatureFlagChanges(splitChanges *dtos.SplitChangesD return toAdd, toRemove } +func (s *UpdaterImpl) processRuleBasedSegmentChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.RuleBasedSegmentDTO, []dtos.RuleBasedSegmentDTO) { + toRemove := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + toAdd := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + for idx := range splitChanges.RuleBasedSegments.RuleBasedSegments { + if splitChanges.RuleBasedSegments.RuleBasedSegments[idx].Status == Active { + validator.ProcessRBMatchers(&splitChanges.RuleBasedSegments.RuleBasedSegments[idx], s.logger) + toAdd = append(toAdd, splitChanges.RuleBasedSegments.RuleBasedSegments[idx]) + } else { + toRemove = append(toRemove, splitChanges.RuleBasedSegments.RuleBasedSegments[idx]) + } + } + return toAdd, toRemove +} + // LocalKill marks a spit as killed in local storage func (s *UpdaterImpl) LocalKill(splitName string, defaultTreatment string, changeNumber int64) { s.splitStorage.KillLocally(splitName, defaultTreatment, changeNumber) @@ -248,6 +283,7 @@ func (s *UpdaterImpl) processFFChange(ffChange dtos.SplitChangeUpdate) *UpdateRe segmentReferences := make([]string, 0, 10) updatedSplitNames := make([]string, 0, 1) largeSegmentReferences := make([]string, 0, 10) + ruleBasedSegmentReferences := make([]string, 0, 10) s.logger.Debug(fmt.Sprintf("updating feature flag %s", ffChange.FeatureFlag().Name)) featureFlags := make([]dtos.SplitDTO, 0, 1) featureFlags = append(featureFlags, *ffChange.FeatureFlag()) @@ -258,17 +294,23 @@ func (s *UpdaterImpl) processFFChange(ffChange dtos.SplitChangeUpdate) *UpdateRe updatedSplitNames = append(updatedSplitNames, ffChange.FeatureFlag().Name) segmentReferences = appendSegmentNames(segmentReferences, &featureFlagChange) largeSegmentReferences = appendLargeSegmentNames(largeSegmentReferences, &featureFlagChange) + ruleBasedSegmentReferences = appendRuleBasedSegmentNames(ruleBasedSegmentReferences, &featureFlagChange) + requiresFetch := false + if !s.ruleBasedSegmentStorage.Contains(ruleBasedSegmentReferences) { + requiresFetch = true + } return &UpdateResult{ UpdatedSplits: updatedSplitNames, ReferencedSegments: segmentReferences, NewChangeNumber: ffChange.BaseUpdate.ChangeNumber(), - RequiresFetch: false, + RequiresFetch: requiresFetch, ReferencedLargeSegments: largeSegmentReferences, } } s.logger.Debug("the feature flag was nil or the previous change number wasn't equal to the feature flag storage's change number") return &UpdateResult{RequiresFetch: true} } + func (s *UpdaterImpl) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*UpdateResult, error) { result := s.processFFChange(*ffChange) if result.RequiresFetch { From dcea4a56de45f3ee90ca7fb82f742f5dfbefb96b Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Tue, 12 Aug 2025 12:45:48 -0300 Subject: [PATCH 2/9] Add rule-based segment logic for streaming and split updates --- dtos/notification.go | 14 +- go.mod | 1 + go.sum | 81 ++++++ push/borrowed.go | 1 + push/manager.go | 2 +- push/mocks/sync.go | 5 + push/parser.go | 56 ++-- push/parser_test.go | 172 ++++++++++- push/processor.go | 24 +- push/rulebasedsegment.go | 78 +++++ push/rulebasedsegment_test.go | 127 +++++++++ service/commons.go | 12 +- storage/mocks/rulebasedsegment.go | 69 +++++ synchronizer/local.go | 12 +- synchronizer/local_test.go | 66 ++++- synchronizer/mocks/mocks.go | 6 + synchronizer/synchronizer.go | 14 +- synchronizer/synchronizer_test.go | 145 ++++++++-- .../rulebasedsegment/rulebasedsegment.go | 122 ++++++++ .../rulebasedsegment/rulebasedsegment_test.go | 266 ++++++++++++++++++ synchronizer/worker/split/split.go | 111 ++++---- synchronizer/worker/split/split_test.go | 249 ++++++++++++++-- tasks/splitsync_test.go | 20 +- 23 files changed, 1511 insertions(+), 142 deletions(-) create mode 100644 push/rulebasedsegment.go create mode 100644 push/rulebasedsegment_test.go create mode 100644 storage/mocks/rulebasedsegment.go create mode 100644 synchronizer/worker/rulebasedsegment/rulebasedsegment.go create mode 100644 synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go diff --git a/dtos/notification.go b/dtos/notification.go index 5a10bc8a..f000b476 100644 --- a/dtos/notification.go +++ b/dtos/notification.go @@ -17,7 +17,7 @@ const ( UpdateTypeSplitChange = "SPLIT_UPDATE" UpdateTypeSplitKill = "SPLIT_KILL" UpdateTypeSegmentChange = "SEGMENT_UPDATE" - UpdateTypeContol = "CONTROL" + UpdateTypeControl = "CONTROL" UpdateTypeLargeSegmentChange = "LS_DEFINITION_UPDATE" UpdateTypeRuleBasedChange = "RB_SEGMENT_UPDATE" ) @@ -333,7 +333,7 @@ func (u *LargeSegmentChangeUpdate) String() string { u.Channel(), u.ChangeNumber(), len(u.LargeSegments), u.Timestamp()) } -// SplitChangeUpdate represents a SplitChange notification generated in the split servers +// RuleBasedChangeUpdate represents a RuleBasedChange notification generated in the split servers type RuleBasedChangeUpdate struct { BaseUpdate previousChangeNumber *int64 @@ -348,20 +348,20 @@ func NewRuleBasedChangeUpdate(baseUpdate BaseUpdate, pcn *int64, ruleBasedSegmen } } -// UpdateType is always UpdateTypeRuleBasedSegmentChange for Rule-based Segmet Updates +// UpdateType always returns UpdateTypeRuleBasedChange for RuleBasedUpdate messages func (u *RuleBasedChangeUpdate) UpdateType() string { return UpdateTypeRuleBasedChange } -// String returns the string representation of a segment update notification +// String returns the String representation of a split change notification func (u *RuleBasedChangeUpdate) String() string { - return fmt.Sprintf("LargeSegmentChange(channel=%s,changeNumber=%d,timestamp=%d)", + return fmt.Sprintf("SplitChange(channel=%s,changeNumber=%d,timestamp=%d)", u.Channel(), u.ChangeNumber(), u.Timestamp()) } // PreviousChangeNumber returns previous change number func (u *RuleBasedChangeUpdate) PreviousChangeNumber() *int64 { return u.previousChangeNumber } -// RuleBasedSegment returns rule-based segment definiiton or nil -func (u *RuleBasedChangeUpdate) RuleBasedsegment() *RuleBasedSegmentDTO { return u.ruleBasedSegment } +// FeatureFlag returns feature flag definiiton or nil +func (u *RuleBasedChangeUpdate) RuleBasedSegment() *RuleBasedSegmentDTO { return u.ruleBasedSegment } // Compile-type assertions of interface requirements var _ Event = &AblyError{} diff --git a/go.mod b/go.mod index bb816c4a..03b3ea1e 100644 --- a/go.mod +++ b/go.mod @@ -21,5 +21,6 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/redis/go-redis/v9 v9.0.4 // indirect + github.com/splitio/go-toolkit/v3 v3.0.1 golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) diff --git a/go.sum b/go.sum index 192010e0..01902878 100644 --- a/go.sum +++ b/go.sum @@ -1,32 +1,113 @@ +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/bits-and-blooms/bitset v1.3.1 h1:y+qrlmq3XsWi+xZqSaueaE8ry8Y127iMxlMfqcK8p0g= github.com/bits-and-blooms/bitset v1.3.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bloom/v3 v3.3.1 h1:K2+A19bXT8gJR5mU7y+1yW6hsKfNCjcP2uNfLFKncjQ= github.com/bits-and-blooms/bloom/v3 v3.3.1/go.mod h1:bhUUknWd5khVbTe4UgMCSiOOVJzr3tMoijSK3WwvW90= github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/redis/go-redis/v9 v9.0.4 h1:FC82T+CHJ/Q/PdyLW++GeCO+Ol59Y4T7R4jbgjvktgc= github.com/redis/go-redis/v9 v9.0.4/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= +github.com/splitio/go-toolkit/v3 v3.0.1 h1:/H2wytH9r4GT4FpVmMWe7wUX99Y67b15fSbfIT1lIt8= +github.com/splitio/go-toolkit/v3 v3.0.1/go.mod h1:HGgawLnM2RlM84zVRbATpPMjF7H6u9CUYG6RlpwOlOk= github.com/splitio/go-toolkit/v5 v5.4.0 h1:g5WFpRhQomnXCmvfsNOWV4s5AuUrWIZ+amM68G8NBKM= github.com/splitio/go-toolkit/v5 v5.4.0/go.mod h1:xYhUvV1gga9/1029Wbp5pjnR6Cy8nvBpjw99wAbsMko= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= +go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/push/borrowed.go b/push/borrowed.go index d5211f0c..788a7159 100644 --- a/push/borrowed.go +++ b/push/borrowed.go @@ -6,6 +6,7 @@ import "github.com/splitio/go-split-commons/v6/dtos" type synchronizerInterface interface { SyncAll() error SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error + SynchronizeRuleBasedSegments(ruleBasedChange *dtos.RuleBasedChangeUpdate) error LocalKill(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegment(segmentName string, till *int64) error StartPeriodicFetching() diff --git a/push/manager.go b/push/manager.go index 47554030..7a777f05 100644 --- a/push/manager.go +++ b/push/manager.go @@ -78,7 +78,7 @@ func NewManager( statusTracker := NewStatusTracker(logger, runtimeTelemetry) parser := NewNotificationParserImpl(logger, processor.ProcessSplitChangeUpdate, processor.ProcessSplitKillUpdate, processor.ProcessSegmentChangeUpdate, - statusTracker.HandleControl, statusTracker.HandleOccupancy, statusTracker.HandleAblyError, processor.ProcessLargeSegmentChangeUpdate) + statusTracker.HandleControl, statusTracker.HandleOccupancy, statusTracker.HandleAblyError, processor.ProcessLargeSegmentChangeUpdate, processor.ProcessorRuleBasedSegmentChangeUpdate) manager := &ManagerImpl{ authAPI: authAPI, diff --git a/push/mocks/sync.go b/push/mocks/sync.go index af5ce506..57157cbf 100644 --- a/push/mocks/sync.go +++ b/push/mocks/sync.go @@ -7,6 +7,7 @@ import ( type LocalSyncMock struct { SyncAllCall func() error SynchronizeFeatureFlagsCall func(ffChange *dtos.SplitChangeUpdate) error + SynchronizeRuleBasedSegmentsCall func(rbChange *dtos.RuleBasedChangeUpdate) error LocalKillCall func(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegmentCall func(segmentName string, till *int64) error StartPeriodicFetchingCall func() @@ -49,6 +50,10 @@ func (l *LocalSyncMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate return l.SynchronizeFeatureFlagsCall(ffChange) } +func (l *LocalSyncMock) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { + return l.SynchronizeRuleBasedSegmentsCall(rbChange) +} + func (l *LocalSyncMock) SynchronizeLargeSegment(name string, till *int64) error { return l.SynchronizeLargeSegmentCall(name, till) } diff --git a/push/parser.go b/push/parser.go index 752c27fd..d2ac3714 100644 --- a/push/parser.go +++ b/push/parser.go @@ -34,15 +34,16 @@ type NotificationParser interface { // NotificationParserImpl implementas the NotificationParser interface type NotificationParserImpl struct { - dataUtils DataUtils - logger logging.LoggerInterface - onSplitUpdate func(*dtos.SplitChangeUpdate) error - onSplitKill func(*dtos.SplitKillUpdate) error - onSegmentUpdate func(*dtos.SegmentChangeUpdate) error - onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error - onControlUpdate func(*dtos.ControlUpdate) *int64 - onOccupancyMesage func(*dtos.OccupancyMessage) *int64 - onAblyError func(*dtos.AblyError) *int64 + dataUtils DataUtils + logger logging.LoggerInterface + onSplitUpdate func(*dtos.SplitChangeUpdate) error + onSplitKill func(*dtos.SplitKillUpdate) error + onSegmentUpdate func(*dtos.SegmentChangeUpdate) error + onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error + onRuleBasedsegmentUpdate func(*dtos.RuleBasedChangeUpdate) error + onControlUpdate func(*dtos.ControlUpdate) *int64 + onOccupancyMesage func(*dtos.OccupancyMessage) *int64 + onAblyError func(*dtos.AblyError) *int64 } func NewNotificationParserImpl( @@ -53,17 +54,19 @@ func NewNotificationParserImpl( onControlUpdate func(*dtos.ControlUpdate) *int64, onOccupancyMessage func(*dtos.OccupancyMessage) *int64, onAblyError func(*dtos.AblyError) *int64, - onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error) *NotificationParserImpl { + onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error, + onRuleBasedSegmentUpdate func(*dtos.RuleBasedChangeUpdate) error) *NotificationParserImpl { return &NotificationParserImpl{ - dataUtils: NewDataUtilsImpl(), - logger: loggerInterface, - onSplitUpdate: onSplitUpdate, - onSplitKill: onSplitKill, - onSegmentUpdate: onSegmentUpdate, - onControlUpdate: onControlUpdate, - onOccupancyMesage: onOccupancyMessage, - onAblyError: onAblyError, - onLargeSegmentUpdate: onLargeSegmentUpdate, + dataUtils: NewDataUtilsImpl(), + logger: loggerInterface, + onSplitUpdate: onSplitUpdate, + onSplitKill: onSplitKill, + onSegmentUpdate: onSegmentUpdate, + onControlUpdate: onControlUpdate, + onOccupancyMesage: onOccupancyMessage, + onAblyError: onAblyError, + onLargeSegmentUpdate: onLargeSegmentUpdate, + onRuleBasedsegmentUpdate: onRuleBasedSegmentUpdate, } } @@ -138,15 +141,14 @@ func (p *NotificationParserImpl) parseUpdate(data *genericData, nested *genericM case dtos.UpdateTypeLargeSegmentChange: largeSegments := p.processLargeSegmentMessage(nested) return nil, p.onLargeSegmentUpdate(dtos.NewLargeSegmentChangeUpdate(base, largeSegments)) - case dtos.UpdateTypeContol: + case dtos.UpdateTypeControl: return p.onControlUpdate(dtos.NewControlUpdate(base.BaseMessage, nested.ControlType)), nil - //case dtos.UpdateTypeRuleBasedChange: - //ruleBased := p.processRuleBasedMessage(nested) - //to do: - // if ruleBased == nil { - // return nil, p.onSplitUpdate(dtos.NewRuleBasedChangeUpdate(base, nil, nil)) - // } - // return nil, p.onSplitUpdate(dtos.NewRuleBasedChangeUpdate(base, &nested.PreviousChangeNumber, ruleBased)) + case dtos.UpdateTypeRuleBasedChange: + ruleBased := p.processRuleBasedMessage(nested) + if ruleBased == nil { + return nil, p.onRuleBasedsegmentUpdate(dtos.NewRuleBasedChangeUpdate(base, nil, nil)) + } + return nil, p.onRuleBasedsegmentUpdate(dtos.NewRuleBasedChangeUpdate(base, &nested.PreviousChangeNumber, ruleBased)) default: // TODO: log full event in debug mode return nil, fmt.Errorf("invalid update type: %s", nested.Type) diff --git a/push/parser_test.go b/push/parser_test.go index d37f1525..2b5dbce0 100644 --- a/push/parser_test.go +++ b/push/parser_test.go @@ -1,6 +1,7 @@ package push import ( + "encoding/base64" "encoding/json" "testing" @@ -19,6 +20,174 @@ const FF_SHOULD_BE_MAURO_JAVA = "feature flag should be mauro_java" const FF_DEFINITION_ZLIB = "eJzMk99u2kwQxV8lOtdryQZj8N6hD5QPlThSTVNVEUKDPYZt1jZar1OlyO9emf8lVFWv2ss5zJyd82O8hTWUZSqZvW04opwhUVdsIKBSSKR+10vS1HWW7pIdz2NyBjRwHS8IXEopTLgbQqDYT+ZUm3LxlV4J4mg81LpMyKqygPRc94YeM6eQTtjphp4fegLVXvD6Qdjt9wPXF6gs2bqCxPC/2eRpDIEXpXXblpGuWCDljGptZ4bJ5lxYSJRZBoFkTcWKozpfsoH0goHfCXpB6PfcngDpVQnZEUjKIlOr2uwWqiC3zU5L1aF+3p7LFhUkPv8/mY2nk3gGgZxssmZzb8p6A9n25ktVtA9iGI3ODXunQ3HDp+AVWT6F+rZWlrWq7MN+YkSWWvuTDvkMSnNV7J6oTdl6qKTEvGnmjcCGjL2IYC/ovPYgUKnvvPtbmrmApiVryLM7p2jE++AfH6fTx09/HvuF32LWnNjStM0Xh3c8ukZcsZlEi3h8/zCObsBpJ0acqYLTmFdtqitK1V6NzrfpdPBbLmVx4uK26e27izpDu/r5yf/16AXun2Cr4u6w591xw7+LfDidLj6Mv8TXwP8xbofv/c7UmtHMmx8BAAD//0fclvU=" const FF_DEFINITION_GZIP = "H4sIAAAAAAAA/8yT327aTBDFXyU612vJxoTgvUMfKB8qcaSapqoihAZ7DNusvWi9TpUiv3tl/pdQVb1qL+cwc3bOj/EGzlKeq3T6tuaYCoZEXbGFgMogkXXDIM0y31v4C/aCgMnrU9/3gl7Pp4yilMMIAuVusqDamvlXeiWIg/FAa5OSU6aEDHz/ip4wZ5Be1AmjoBsFAtVOCO56UXh31/O7ApUjV1eQGPw3HT+NIPCitG7bctIVC2ScU63d1DK5gksHCZPnEEhXVC45rosFW8ig1++GYej3g85tJEB6aSA7Aqkpc7Ws7XahCnLTbLVM7evnzalsUUHi8//j6WgyTqYQKMilK7b31tRryLa3WKiyfRCDeHhq2Dntiys+JS/J8THUt5VyrFXlHnYTQ3LU2h91yGdQVqhy+0RtTeuhUoNZ08wagTVZdxbBndF5vYVApb7z9m9pZgKaFqwhT+6coRHvg398nEweP/157Bd+S1hz6oxtm88O73B0jbhgM47nyej+YRRfgdNODDlXJWcJL9tUF5SqnRqfbtPr4LdcTHnk4rfp3buLOkG7+Pmp++vRM9w/wVblzX7Pm8OGfxf5YDKZfxh9SS6B/2Pc9t/7ja01o5k1PwIAAP//uTipVskEAAA=" +func TestParseRuleBasedSegmentUpdate(t *testing.T) { + event := &sseMocks.RawEventMock{ + IDCall: func() string { return "abc" }, + EventCall: func() string { return dtos.SSEEventTypeMessage }, + DataCall: func() string { + ruleBasedSegment := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: "ACTIVE", + Conditions: []dtos.RuleBasedConditionDTO{ + { + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + { + MatcherType: "IN_SEGMENT", + UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ + SegmentName: "segment1", + }, + }, + }, + }, + }, + }, + } + ruleBasedJSON, _ := json.Marshal(ruleBasedSegment) + def := base64.StdEncoding.EncodeToString(ruleBasedJSON) + compressType := 0 + updateJSON, _ := json.Marshal(genericMessageData{ + Type: dtos.UpdateTypeRuleBasedChange, + ChangeNumber: 123, + Definition: &def, + CompressType: &compressType, + }) + mainJSON, _ := json.Marshal(genericData{ + Timestamp: 123, + Data: string(updateJSON), + Channel: "sarasa_rule_based_segments", + }) + return string(mainJSON) + }, + IsErrorCall: func() bool { return false }, + IsEmptyCall: func() bool { return false }, + RetryCall: func() int64 { return 0 }, + } + + logger := logging.NewLogger(nil) + parser := &NotificationParserImpl{ + dataUtils: NewDataUtilsImpl(), + logger: logger, + onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { + if u.ChangeNumber() != 123 { + t.Error("Change number should be 123. Got:", u.ChangeNumber()) + } + if u.Channel() != "sarasa_rule_based_segments" { + t.Error("Channel should be sarasa_rule_based_segments. Got:", u.Channel()) + } + if u.RuleBasedSegment() == nil { + t.Error("Rule-based segment should not be nil") + } + if u.RuleBasedSegment().Name != "rb1" { + t.Error("Rule-based segment name should be rb1. Got:", u.RuleBasedSegment().Name) + } + if len(u.RuleBasedSegment().Conditions) != 1 { + t.Error("Rule-based segment should have 1 condition. Got:", len(u.RuleBasedSegment().Conditions)) + } + return nil + }, + } + + _, err := parser.ParseAndForward(event) + if err != nil { + t.Error("No error should have been returned. Got:", err) + } +} + +func TestParseRuleBasedSegmentUpdateWithPreviousChangeNumber(t *testing.T) { + event := &sseMocks.RawEventMock{ + IDCall: func() string { return "abc" }, + EventCall: func() string { return dtos.SSEEventTypeMessage }, + DataCall: func() string { + ruleBasedSegment := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: "ACTIVE", + } + var previousChangeNumber int64 = 100 + ruleBasedJSON, _ := json.Marshal(ruleBasedSegment) + def := base64.StdEncoding.EncodeToString(ruleBasedJSON) + compressType := 0 + updateJSON, _ := json.Marshal(genericMessageData{ + Type: dtos.UpdateTypeRuleBasedChange, + ChangeNumber: 123, + PreviousChangeNumber: previousChangeNumber, + Definition: &def, + CompressType: &compressType, + }) + mainJSON, _ := json.Marshal(genericData{ + Timestamp: 123, + Data: string(updateJSON), + Channel: "sarasa_rule_based_segments", + }) + return string(mainJSON) + }, + IsErrorCall: func() bool { return false }, + IsEmptyCall: func() bool { return false }, + RetryCall: func() int64 { return 0 }, + } + + logger := logging.NewLogger(nil) + parser := &NotificationParserImpl{ + dataUtils: NewDataUtilsImpl(), + logger: logger, + onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { + if u.ChangeNumber() != 123 { + t.Error("Change number should be 123. Got:", u.ChangeNumber()) + } + if u.PreviousChangeNumber() == nil { + t.Error("Previous change number should not be nil") + } else if *u.PreviousChangeNumber() != 100 { + t.Error("Previous change number should be 100. Got:", *u.PreviousChangeNumber()) + } + return nil + }, + } + + _, err := parser.ParseAndForward(event) + if err != nil { + t.Error("No error should have been returned. Got:", err) + } +} + +func TestParseRuleBasedSegmentUpdateWithNilSegment(t *testing.T) { + event := &sseMocks.RawEventMock{ + IDCall: func() string { return "abc" }, + EventCall: func() string { return dtos.SSEEventTypeMessage }, + DataCall: func() string { + updateJSON, _ := json.Marshal(genericMessageData{ + Type: dtos.UpdateTypeRuleBasedChange, + ChangeNumber: 123, + }) + mainJSON, _ := json.Marshal(genericData{ + Timestamp: 123, + Data: string(updateJSON), + Channel: "sarasa_rule_based_segments", + }) + return string(mainJSON) + }, + IsErrorCall: func() bool { return false }, + IsEmptyCall: func() bool { return false }, + RetryCall: func() int64 { return 0 }, + } + + logger := logging.NewLogger(nil) + parser := &NotificationParserImpl{ + dataUtils: NewDataUtilsImpl(), + logger: logger, + onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { + if u.RuleBasedSegment() != nil { + t.Error("Rule-based segment should be nil") + } + return nil + }, + } + + _, err := parser.ParseAndForward(event) + if err != nil { + t.Error("No error should have been returned. Got:", err) + } +} + func TestParseSplitUpdate(t *testing.T) { event := &sseMocks.RawEventMock{ IDCall: func() string { return "abc" }, @@ -407,7 +576,7 @@ func TestControl(t *testing.T) { EventCall: func() string { return dtos.SSEEventTypeMessage }, DataCall: func() string { updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeContol, + Type: dtos.UpdateTypeControl, ControlType: dtos.ControlTypeStreamingDisabled, }) mainJSON, _ := json.Marshal(genericData{ @@ -642,6 +811,7 @@ func TestNewNotificationParserImpl(t *testing.T) { return common.Int64Ref(123) }, nil, + nil, nil) if status, err := parser.ParseAndForward(event); *status != 123 || err != nil { diff --git a/push/processor.go b/push/processor.go index d393364b..ecf854d7 100644 --- a/push/processor.go +++ b/push/processor.go @@ -26,19 +26,21 @@ type Processor interface { ProcessSplitKillUpdate(update *dtos.SplitKillUpdate) error ProcessSegmentChangeUpdate(update *dtos.SegmentChangeUpdate) error ProcessLargeSegmentChangeUpdate(update *dtos.LargeSegmentChangeUpdate) error + ProcessorRuleBasedSegmentChangeUpdate(update *dtos.RuleBasedChangeUpdate) error StartWorkers() StopWorkers() } // ProcessorImpl struct for notification processor type ProcessorImpl struct { - segmentQueue chan dtos.SegmentChangeUpdate - splitQueue chan dtos.SplitChangeUpdate - splitWorker *SplitUpdateWorker - segmentWorker *SegmentUpdateWorker - synchronizer synchronizerInterface - logger logging.LoggerInterface - largeSegment *LargeSegment + segmentQueue chan dtos.SegmentChangeUpdate + splitQueue chan dtos.SplitChangeUpdate + ruleBasedQueue chan dtos.RuleBasedChangeUpdate + splitWorker *SplitUpdateWorker + segmentWorker *SegmentUpdateWorker + synchronizer synchronizerInterface + logger logging.LoggerInterface + largeSegment *LargeSegment } // NewProcessor creates new processor @@ -135,6 +137,14 @@ func (p *ProcessorImpl) ProcessLargeSegmentChangeUpdate(update *dtos.LargeSegmen return nil } +func (p *ProcessorImpl) ProcessorRuleBasedSegmentChangeUpdate(update *dtos.RuleBasedChangeUpdate) error { + if update == nil { + return errors.New("rule-based segment change update cannot be nil") + } + p.ruleBasedQueue <- *update + return nil +} + // StartWorkers enables split & segments workers func (p *ProcessorImpl) StartWorkers() { p.splitWorker.Start() diff --git a/push/rulebasedsegment.go b/push/rulebasedsegment.go new file mode 100644 index 00000000..33fac915 --- /dev/null +++ b/push/rulebasedsegment.go @@ -0,0 +1,78 @@ +package push + +import ( + "errors" + "fmt" + + "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-toolkit/v5/logging" + "github.com/splitio/go-toolkit/v5/struct/traits/lifecycle" +) + +// SplitUpdateWorker struct +type RuleBasedUpdateWorker struct { + ruleBasedQueue chan dtos.RuleBasedChangeUpdate + sync synchronizerInterface + logger logging.LoggerInterface + lifecycle lifecycle.Manager +} + +// NewRuleBasedUpdateWorker creates SplitRuleBasedWorker +func NewRuleBasedUpdateWorker( + ruleBasedQueue chan dtos.RuleBasedChangeUpdate, + synchronizer synchronizerInterface, + logger logging.LoggerInterface, +) (*RuleBasedUpdateWorker, error) { + if cap(ruleBasedQueue) < 5000 { + return nil, errors.New("") + } + + worker := &RuleBasedUpdateWorker{ + ruleBasedQueue: ruleBasedQueue, + sync: synchronizer, + logger: logger, + } + worker.lifecycle.Setup() + return worker, nil +} + +// Start starts worker +func (s *RuleBasedUpdateWorker) Start() { + if !s.lifecycle.BeginInitialization() { + s.logger.Info("Rule-based worker is already running") + return + } + + s.logger.Debug("Started RuleBasedUpdateWorker") + go func() { + defer s.lifecycle.ShutdownComplete() + s.lifecycle.InitializationComplete() + for { + select { + case ruleBasedUpdate := <-s.ruleBasedQueue: + s.logger.Debug("Received Rule-based update and proceding to perform fetch") + s.logger.Debug(fmt.Sprintf("ChangeNumber: %d", ruleBasedUpdate.ChangeNumber())) + err := s.sync.SynchronizeRuleBasedSegments(&ruleBasedUpdate) + if err != nil { + s.logger.Error(err) + } + case <-s.lifecycle.ShutdownRequested(): + return + } + } + }() +} + +// Stop stops worker +func (s *RuleBasedUpdateWorker) Stop() { + if !s.lifecycle.BeginShutdown() { + s.logger.Debug("Rule-based worker not runnning. Ignoring.") + return + } + s.lifecycle.AwaitShutdownComplete() +} + +// IsRunning indicates if worker is running or not +func (s *RuleBasedUpdateWorker) IsRunning() bool { + return s.lifecycle.IsRunning() +} diff --git a/push/rulebasedsegment_test.go b/push/rulebasedsegment_test.go new file mode 100644 index 00000000..cc75b188 --- /dev/null +++ b/push/rulebasedsegment_test.go @@ -0,0 +1,127 @@ +package push + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-toolkit/v5/logging" +) + +type mockSynchronizer struct { + syncCalled int64 + syncError atomic.Value +} + +func (m *mockSynchronizer) SyncAll() error { return nil } +func (m *mockSynchronizer) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error { + return nil +} +func (m *mockSynchronizer) SynchronizeRuleBasedSegments(update *dtos.RuleBasedChangeUpdate) error { + atomic.AddInt64(&m.syncCalled, 1) + if err := m.syncError.Load(); err != nil && err.(error) != nil { + return err.(error) + } + return nil +} +func (m *mockSynchronizer) LocalKill(splitName string, defaultTreatment string, changeNumber int64) {} +func (m *mockSynchronizer) SynchronizeSegment(segmentName string, till *int64) error { return nil } +func (m *mockSynchronizer) StartPeriodicFetching() {} +func (m *mockSynchronizer) StopPeriodicFetching() {} +func (m *mockSynchronizer) StartPeriodicDataRecording() {} +func (m *mockSynchronizer) StopPeriodicDataRecording() {} +func (m *mockSynchronizer) SynchronizeLargeSegment(name string, till *int64) error { return nil } +func (m *mockSynchronizer) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error { + return nil +} + +func TestRuleBasedUpdateWorkerCreation(t *testing.T) { + // Test with invalid queue size + smallQueue := make(chan dtos.RuleBasedChangeUpdate, 100) + _, err := NewRuleBasedUpdateWorker(smallQueue, nil, nil) + if err == nil { + t.Error("Should return error for small queue") + } + + // Test with valid queue size + validQueue := make(chan dtos.RuleBasedChangeUpdate, 5000) + worker, err := NewRuleBasedUpdateWorker(validQueue, nil, logging.NewLogger(&logging.LoggerOptions{})) + if err != nil { + t.Error("Should not return error for valid queue") + } + if worker == nil { + t.Error("Should return valid worker") + } +} + +func TestRuleBasedUpdateWorkerStartStop(t *testing.T) { + queue := make(chan dtos.RuleBasedChangeUpdate, 5000) + synchronizer := &mockSynchronizer{} + worker, _ := NewRuleBasedUpdateWorker(queue, synchronizer, logging.NewLogger(&logging.LoggerOptions{})) + + if worker.IsRunning() { + t.Error("Worker should not be running before Start") + } + + worker.Start() + time.Sleep(100 * time.Millisecond) // Wait for initialization + if !worker.IsRunning() { + t.Error("Worker should be running after Start") + } + + // Try to start again + worker.Start() + if !worker.IsRunning() { + t.Error("Worker should still be running after second Start") + } + + worker.Stop() + if worker.IsRunning() { + t.Error("Worker should not be running after Stop") + } + + // Try to stop again + worker.Stop() + if worker.IsRunning() { + t.Error("Worker should still not be running after second Stop") + } +} + +func TestRuleBasedUpdateWorkerProcessing(t *testing.T) { + queue := make(chan dtos.RuleBasedChangeUpdate, 5000) + synchronizer := &mockSynchronizer{} + synchronizer.syncError.Store(errors.New("")) + worker, _ := NewRuleBasedUpdateWorker(queue, synchronizer, logging.NewLogger(&logging.LoggerOptions{})) + + worker.Start() + + // Test successful update + var changeNumber int64 = 123 + queue <- *dtos.NewRuleBasedChangeUpdate( + dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 123), + &changeNumber, + &dtos.RuleBasedSegmentDTO{Name: "test"}, + ) + + time.Sleep(100 * time.Millisecond) + if atomic.LoadInt64(&synchronizer.syncCalled) != 1 { + t.Error("Synchronizer should be called once") + } + + // Test update with error + synchronizer.syncError.Store(errors.New("some error")) + queue <- *dtos.NewRuleBasedChangeUpdate( + dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 124), + &changeNumber, + &dtos.RuleBasedSegmentDTO{Name: "test"}, + ) + + time.Sleep(100 * time.Millisecond) + if atomic.LoadInt64(&synchronizer.syncCalled) != 2 { + t.Error("Synchronizer should be called twice") + } + + worker.Stop() +} diff --git a/service/commons.go b/service/commons.go index e7e29250..bdd56681 100644 --- a/service/commons.go +++ b/service/commons.go @@ -1,6 +1,7 @@ package service import ( + "fmt" "net/http" "net/url" "strconv" @@ -98,6 +99,11 @@ func (s *FlagRequestParams) ChangeNumberRB() int64 { return s.changeNumberRB } +// Till returns the till value +func (s *FlagRequestParams) Till() *int64 { + return s.till +} + // Apply applies the request parameters func (s *FlagRequestParams) Apply(request *http.Request) error { if s.cacheControlHeaders { @@ -108,13 +114,13 @@ func (s *FlagRequestParams) Apply(request *http.Request) error { if s.specVersion != nil { queryParameters = append(queryParameters, queryParamater{key: spec, value: common.StringFromRef(s.specVersion)}) } - queryParameters = append(queryParameters, queryParamater{key: since, value: strconv.FormatInt(s.changeNumber, 10)}) - queryParameters = append(queryParameters, queryParamater{key: rbSince, value: strconv.FormatInt(s.changeNumberRB, 10)}) + queryParameters = append(queryParameters, queryParamater{key: since, value: fmt.Sprint(s.changeNumber)}) + queryParameters = append(queryParameters, queryParamater{key: rbSince, value: fmt.Sprint(s.changeNumberRB)}) if len(s.flagSetsFilter) > 0 { queryParameters = append(queryParameters, queryParamater{key: sets, value: s.flagSetsFilter}) } if s.till != nil { - queryParameters = append(queryParameters, queryParamater{key: till, value: strconv.FormatInt(*s.till, 10)}) + queryParameters = append(queryParameters, queryParamater{key: till, value: fmt.Sprint(*s.till)}) } request.URL.RawQuery = encode(queryParameters) diff --git a/storage/mocks/rulebasedsegment.go b/storage/mocks/rulebasedsegment.go new file mode 100644 index 00000000..4ee0ddc7 --- /dev/null +++ b/storage/mocks/rulebasedsegment.go @@ -0,0 +1,69 @@ +package mocks + +import ( + "github.com/splitio/go-split-commons/v6/dtos" +) + +// MockSegmentStorage is a mocked implementation of Segment Storage +type MockRuleBasedSegmentStorage struct { + ChangeNumberCall func() int64 + AllCall func() []dtos.RuleBasedSegmentDTO + RuleBasedSegmentNamesCall func() []string + ContainsCall func(ruleBasedSegmentNames []string) bool + GetSegmentsCall func() []string + CountCall func() int + GetRuleBasedSegmentByNameCall func(name string) (*dtos.RuleBasedSegmentDTO, error) + SetChangeNumberCall func(name string, till int64) + UpdateCall func(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) + ClearCall func() +} + +// ChangeNumber mock +func (m MockRuleBasedSegmentStorage) ChangeNumber() int64 { + return m.ChangeNumberCall() +} + +// All mock +func (m MockRuleBasedSegmentStorage) All() []dtos.RuleBasedSegmentDTO { + return m.AllCall() +} + +// RuleBasedSegmentNames mock +func (m MockRuleBasedSegmentStorage) RuleBasedSegmentNames() []string { + return m.RuleBasedSegmentNamesCall() +} + +// Contains mock +func (m MockRuleBasedSegmentStorage) Contains(ruleBasedSegmentNames []string) bool { + return m.ContainsCall(ruleBasedSegmentNames) +} + +// GetSegments mock +func (m MockRuleBasedSegmentStorage) GetSegments() []string { + return m.GetSegmentsCall() +} + +// Count mock +func (m MockRuleBasedSegmentStorage) Count() int { + return m.CountCall() +} + +// GetRuleBasedSegmentByName mock +func (m MockRuleBasedSegmentStorage) GetRuleBasedSegmentByName(name string) (*dtos.RuleBasedSegmentDTO, error) { + return m.GetRuleBasedSegmentByNameCall(name) +} + +// SetChangeNumber mock +func (m MockRuleBasedSegmentStorage) SetChangeNumber(name string, till int64) { + m.SetChangeNumberCall(name, till) +} + +// Update mock +func (m MockRuleBasedSegmentStorage) Update(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + m.UpdateCall(toAdd, toRemove, till) +} + +// Clear mock +func (m MockRuleBasedSegmentStorage) Clear() { + m.ClearCall() +} diff --git a/synchronizer/local.go b/synchronizer/local.go index 347b0b2a..4f8ee55a 100644 --- a/synchronizer/local.go +++ b/synchronizer/local.go @@ -8,6 +8,7 @@ import ( "github.com/splitio/go-split-commons/v6/healthcheck/application" "github.com/splitio/go-split-commons/v6/service/api" "github.com/splitio/go-split-commons/v6/storage" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -32,10 +33,14 @@ type LocalConfig struct { } // NewLocal creates new Local -func NewLocal(cfg *LocalConfig, splitAPI *api.SplitAPI, splitStorage storage.SplitStorage, segmentStorage storage.SegmentStorage, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, hcMonitor application.MonitorProducerInterface) Synchronizer { +func NewLocal(cfg *LocalConfig, splitAPI *api.SplitAPI, splitStorage storage.SplitStorage, segmentStorage storage.SegmentStorage, ruleBasedStorage storage.RuleBasedSegmentsStorage, ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, hcMonitor application.MonitorProducerInterface) Synchronizer { + splitUpdater := split.NewSplitUpdater(splitStorage, ruleBasedStorage, ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, runtimeTelemetry, hcMonitor, flagsets.NewFlagSetFilter(cfg.FlagSets)) + splitUpdater.SetRuleBasedSegmentStorage(ruleBasedStorage) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitStorage, splitAPI.SplitFetcher, logger, runtimeTelemetry, hcMonitor, flagsets.NewFlagSetFilter(cfg.FlagSets)), + SplitUpdater: splitUpdater, } + workers.RuleBasedSegmentUpdater = rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedStorage, logger) if cfg.SegmentDirectory != "" { workers.SegmentUpdater = segment.NewSegmentUpdater(splitStorage, segmentStorage, splitAPI.SegmentFetcher, logger, runtimeTelemetry, hcMonitor) } @@ -121,3 +126,6 @@ func (s *Local) LocalKill(splitName string, defaultTreatment string, changeNumbe // SynchronizeFeatureFlags no logic attached for localhost mode func (s *Local) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error { return nil } + +// SynchronizeRuleBasedsegments no logic attached for localhost mode +func (s *Local) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { return nil } diff --git a/synchronizer/local_test.go b/synchronizer/local_test.go index 5cc7a083..bdce1690 100644 --- a/synchronizer/local_test.go +++ b/synchronizer/local_test.go @@ -7,11 +7,14 @@ import ( "time" "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-split-commons/v6/flagsets" hcMock "github.com/splitio/go-split-commons/v6/healthcheck/mocks" "github.com/splitio/go-split-commons/v6/service" "github.com/splitio/go-split-commons/v6/service/api" httpMocks "github.com/splitio/go-split-commons/v6/service/mocks" "github.com/splitio/go-split-commons/v6/storage/mocks" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-toolkit/v5/logging" ) @@ -33,14 +36,44 @@ func TestLocalSyncAllError(t *testing.T) { splitMockStorage := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { return -1, nil }, } - segmentMockStorage := mocks.MockSegmentStorage{} telemetryMockStorage := mocks.MockTelemetryStorage{} appMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - syncForTest := NewLocal(&LocalConfig{}, &splitAPI, splitMockStorage, segmentMockStorage, logger, telemetryMockStorage, appMonitorMock) + + flagSetFilter := flagsets.NewFlagSetFilter(nil) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := split.NewSplitUpdater( + splitMockStorage, + ruleBasedSegmentMockStorage, + *ruleBasedSegmentUpdater, + splitAPI.SplitFetcher, + logger, + telemetryMockStorage, + appMonitorMock, + flagSetFilter, + ) + splitUpdater.SetRuleBasedSegmentStorage(ruleBasedSegmentMockStorage) + + workers := Workers{ + SplitUpdater: splitUpdater, + RuleBasedSegmentUpdater: rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logger), + } + + syncForTest := &Local{ + splitTasks: SplitTasks{}, + workers: workers, + logger: logger, + } + err := syncForTest.SyncAll() if err == nil { t.Error("It should return error") @@ -55,6 +88,7 @@ func TestLocalSyncAllError(t *testing.T) { func TestLocalSyncAllOk(t *testing.T) { var splitFetchCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} logger := logging.NewLogger(&logging.LoggerOptions{}) @@ -86,6 +120,12 @@ func TestLocalSyncAllOk(t *testing.T) { } var notifyEventCalled int64 segmentMockStorage := mocks.MockSegmentStorage{} + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { return -1 }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, RecordSuccessfulSyncCall: func(resource int, when time.Time) {}, @@ -95,7 +135,10 @@ func TestLocalSyncAllOk(t *testing.T) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - syncForTest := NewLocal(&LocalConfig{}, &splitAPI, splitMockStorage, segmentMockStorage, logger, telemetryMockStorage, appMonitorMock) + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + syncForTest := NewLocal(&LocalConfig{}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, logger, telemetryMockStorage, appMonitorMock) err := syncForTest.SyncAll() if err != nil { t.Error("It should not return error") @@ -106,10 +149,14 @@ func TestLocalSyncAllOk(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Errorf("It should be called once. Actual %d", notifyEventCalled) } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestLocalPeriodicFetching(t *testing.T) { var splitFetchCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} logger := logging.NewLogger(&logging.LoggerOptions{}) @@ -140,6 +187,12 @@ func TestLocalPeriodicFetching(t *testing.T) { }, } segmentMockStorage := mocks.MockSegmentStorage{} + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { return -1 }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, RecordSuccessfulSyncCall: func(resource int, when time.Time) {}, @@ -150,7 +203,9 @@ func TestLocalPeriodicFetching(t *testing.T) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - syncForTest := NewLocal(&LocalConfig{RefreshEnabled: true, SplitPeriod: 1}, &splitAPI, splitMockStorage, segmentMockStorage, logger, telemetryMockStorage, appMonitorMock) + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + syncForTest := NewLocal(&LocalConfig{RefreshEnabled: true, SplitPeriod: 1}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, logger, telemetryMockStorage, appMonitorMock) syncForTest.StartPeriodicFetching() time.Sleep(time.Millisecond * 1500) if atomic.LoadInt64(&splitFetchCalled) != 1 { @@ -160,4 +215,7 @@ func TestLocalPeriodicFetching(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Errorf("It should be called once. Actual %d", notifyEventCalled) } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } diff --git a/synchronizer/mocks/mocks.go b/synchronizer/mocks/mocks.go index 4579a8bb..a7184893 100644 --- a/synchronizer/mocks/mocks.go +++ b/synchronizer/mocks/mocks.go @@ -19,6 +19,7 @@ type MockSynchronizer struct { RefreshRatesCall func() (time.Duration, time.Duration) SynchronizeLargeSegmentCall func(name string, till *int64) error SynchronizeLargeSegmentUpdateCall func(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error + SynchronizeRuleBasedSegmentsCall func(rbChange *dtos.RuleBasedChangeUpdate) error } // SyncAll mock @@ -75,3 +76,8 @@ func (m *MockSynchronizer) SynchronizeLargeSegment(name string, till *int64) err func (m *MockSynchronizer) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error { return m.SynchronizeLargeSegmentUpdateCall(lsRFDResponseDTO) } + +// SynchronizeRuleBasedSegments call +func (m *MockSynchronizer) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { + return m.SynchronizeRuleBasedSegmentsCall(rbChange) +} diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 948c488d..b37d597a 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -9,6 +9,7 @@ import ( "github.com/splitio/go-split-commons/v6/synchronizer/worker/impression" "github.com/splitio/go-split-commons/v6/synchronizer/worker/impressionscount" "github.com/splitio/go-split-commons/v6/synchronizer/worker/largesegment" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -37,6 +38,7 @@ type Workers struct { SplitUpdater split.Updater SegmentUpdater segment.Updater LargeSegmentUpdater largesegment.Updater + RuleBasedSegmentUpdater rulebasedsegment.Updater TelemetryRecorder telemetry.TelemetrySynchronizer ImpressionRecorder impression.ImpressionRecorder EventRecorder event.EventRecorder @@ -47,6 +49,7 @@ type Workers struct { type Synchronizer interface { SyncAll() error SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error + SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error LocalKill(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegment(segmentName string, till *int64) error StartPeriodicFetching() @@ -233,11 +236,18 @@ func (s *SynchronizerImpl) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos. // SynchronizeFeatureFlags syncs featureFlags func (s *SynchronizerImpl) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error { result, err := s.workers.SplitUpdater.SynchronizeFeatureFlags(ffChange) - s.synchronizeSegmentsAfterSplitSync(result.ReferencedSegments) + s.synchronizeSegmentsAfterSplitAndRBSync(result.ReferencedSegments) s.synchronizeLargeSegmentsAfterSplitSync(result.ReferencedLargeSegments) return err } +// SynchronizeRuleBasedSegments syncs rule-based segments +func (s *SynchronizerImpl) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { + result, err := s.workers.RuleBasedSegmentUpdater.SynchronizeRuleBasedSegment(rbChange) + s.synchronizeSegmentsAfterSplitAndRBSync(result.ReferencedSegments) + return err +} + func (s *SynchronizerImpl) dataFlusher() { for { msg := <-s.inMememoryFullQueue @@ -268,7 +278,7 @@ func (s *SynchronizerImpl) filterCachedSegments(segmentsReferenced []string) []s return toRet } -func (s *SynchronizerImpl) synchronizeSegmentsAfterSplitSync(referencedSegments []string) { +func (s *SynchronizerImpl) synchronizeSegmentsAfterSplitAndRBSync(referencedSegments []string) { for _, segment := range s.filterCachedSegments(referencedSegments) { go s.SynchronizeSegment(segment, nil) // send segment to workerpool (queue is bypassed) } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 704460f1..f896a9e0 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -16,10 +16,12 @@ import ( "github.com/splitio/go-split-commons/v6/service/api" httpMocks "github.com/splitio/go-split-commons/v6/service/mocks" "github.com/splitio/go-split-commons/v6/storage/inmemory" + "github.com/splitio/go-split-commons/v6/storage/mocks" storageMock "github.com/splitio/go-split-commons/v6/storage/mocks" syncMocks "github.com/splitio/go-split-commons/v6/synchronizer/mocks" "github.com/splitio/go-split-commons/v6/synchronizer/worker/event" "github.com/splitio/go-split-commons/v6/synchronizer/worker/impression" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -40,6 +42,18 @@ func validReqParams(t *testing.T, fetchOptions service.RequestParams) { } } +func createSplitUpdater(splitMockStorage storageMock.MockSplitStorage, splitAPI api.SplitAPI, logger logging.LoggerInterface, telemetryMockStorage storageMock.MockTelemetryStorage, appMonitorMock hcMock.MockApplicationMonitor) split.Updater { + ruleBasedSegmentMockStorage := storageMock.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { return -1 }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + return splitUpdater +} + func TestSyncAllErrorSplits(t *testing.T) { var splitFetchCalled int64 var notifyEventCalled int64 @@ -65,12 +79,21 @@ func TestSyncAllErrorSplits(t *testing.T) { }, } advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} + ruleBasedSegmentMockStorage := storageMock.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { return -1 }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), - SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, storageMock.MockSegmentStorage{}, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), - EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), - ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), - TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), + SplitUpdater: splitUpdater, + SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, storageMock.MockSegmentStorage{}, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), + EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), + ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), + TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), + RuleBasedSegmentUpdater: rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logger), } splitTasks := SplitTasks{ EventSyncTask: tasks.NewRecordEventsTask(workers.EventRecorder, advanced.EventsBulkSize, 10, logger), @@ -148,7 +171,7 @@ func TestSyncAllErrorInSegments(t *testing.T) { } advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: createSplitUpdater(splitMockStorage, splitAPI, logger, telemetryMockStorage, appMonitorMock), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), @@ -252,7 +275,7 @@ func TestSyncAllOk(t *testing.T) { advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: createSplitUpdater(splitMockStorage, splitAPI, logger, telemetryMockStorage, appMonitorMock), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), @@ -356,7 +379,7 @@ func TestPeriodicFetching(t *testing.T) { } advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: createSplitUpdater(splitMockStorage, splitAPI, logger, telemetryMockStorage, appMonitorMock), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), @@ -480,7 +503,7 @@ func TestPeriodicRecording(t *testing.T) { } advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: createSplitUpdater(splitMockStorage, splitAPI, logger, telemetryMockStorage, appMonitorMock), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(eventMockStorage, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), ImpressionRecorder: impression.NewRecorderSingle(impressionMockStorage, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), @@ -542,8 +565,16 @@ func TestSplitUpdateWorkerCNGreaterThanFFChange(t *testing.T) { telemetryMockStorage := storageMock.MockTelemetryStorage{} appMonitorMock := hcMock.MockApplicationMonitor{} + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -595,8 +626,16 @@ func TestSplitUpdateWorkerStorageCNEqualsFFCN(t *testing.T) { telemetryMockStorage := storageMock.MockTelemetryStorage{} appMonitorMock := hcMock.MockApplicationMonitor{} + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -655,8 +694,16 @@ func TestSplitUpdateWorkerFFPcnEqualsFFNotNil(t *testing.T) { telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -695,6 +742,7 @@ func TestSplitUpdateWorkerFFPcnEqualsFFNotNil(t *testing.T) { func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 + var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -737,8 +785,19 @@ func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { NotifyEventCall: func(counterType int) {}, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -768,11 +827,15 @@ func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { if u := atomic.LoadInt64(&updateCalled); u != 1 { t.Error("should have been called once. got: ", u) } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestSplitUpdateWorkerFFIsNil(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 + var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -805,9 +868,19 @@ func TestSplitUpdateWorkerFFIsNil(t *testing.T) { hcMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) {}, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -836,11 +909,15 @@ func TestSplitUpdateWorkerFFIsNil(t *testing.T) { if u := atomic.LoadInt64(&updateCalled); u != 1 { t.Error("should have been called once. got: ", u) } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 + var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -859,8 +936,8 @@ func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { return 1, nil }, UpdateCall: func(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) { - if changeNumber != 2 { - t.Error("It should be 2") + if changeNumber != 5 { + t.Error("It should be 5") } atomic.AddInt64(&updateCalled, 1) }, @@ -869,13 +946,25 @@ func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { telemetryMockStorage := storageMock.MockTelemetryStorage{ RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, RecordSuccessfulSyncCall: func(resource int, when time.Time) {}, + RecordUpdatesFromSSECall: func(updateType int) {}, } hcMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) {}, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -899,7 +988,7 @@ func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { t.Error("It should be running") } - if c := atomic.LoadInt64(&splitFetchCalled); c != 1 { + if c := atomic.LoadInt64(&splitFetchCalled); c != 0 { t.Error("should have been called once. got: ", c) } if u := atomic.LoadInt64(&updateCalled); u != 1 { @@ -923,8 +1012,16 @@ func TestLocalKill(t *testing.T) { } }, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, storageMock.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, storageMock.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)), } splitTasks := SplitTasks{ SplitSyncTask: tasks.NewFetchSplitsTask(workers.SplitUpdater, 1, logger), @@ -996,8 +1093,16 @@ func TestSplitUpdateWithReferencedSegments(t *testing.T) { NotifyEventCall: func(counterType int) {}, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), diff --git a/synchronizer/worker/rulebasedsegment/rulebasedsegment.go b/synchronizer/worker/rulebasedsegment/rulebasedsegment.go new file mode 100644 index 00000000..ec59f925 --- /dev/null +++ b/synchronizer/worker/rulebasedsegment/rulebasedsegment.go @@ -0,0 +1,122 @@ +package rulebasedsegment + +import ( + "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-split-commons/v6/engine/validator" + "github.com/splitio/go-split-commons/v6/storage" + "github.com/splitio/go-toolkit/v3/logging" +) + +const ( + Active = "ACTIVE" + Archived = "ARCHIVED" + TypeStandard = "standard" + TypeRuleBased = "rule-based" + TypeLarge = "large" + matcherTypeInSegment = "IN_SEGMENT" +) + +// Updater interface +type Updater interface { + SynchronizeRuleBasedSegment(rbChange *dtos.RuleBasedChangeUpdate) (*UpdateResult, error) +} + +// UpdateResult encapsulates information regarding the split update performed +type UpdateResult struct { + ReferencedSegments []string + NewChangeNumber int64 + RequiresFetch bool +} + +// UpdaterImpl struct for split sync +type UpdaterImpl struct { + ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage + logger logging.LoggerInterface +} + +// NewRuleBasedUpdater creates new split synchronizer for processing rule-based updates +func NewRuleBasedSegmentUpdater( + ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage, + logger logging.LoggerInterface, +) *UpdaterImpl { + return &UpdaterImpl{ + ruleBasedSegmentStorage: ruleBasedSegmentStorage, + logger: logger, + } +} + +func (s *UpdaterImpl) SynchronizeRuleBasedSegment(ruleBasedChange *dtos.RuleBasedChangeUpdate) (*UpdateResult, error) { + result := s.processRuleBasedChangeUpdate(ruleBasedChange) + return result, nil +} + +func (s *UpdaterImpl) ProcessUpdate(splitChanges *dtos.SplitChangesDTO) []string { + activeRB, inactiveRB, segments := s.processRuleBasedSegmentChanges(splitChanges) + // Add/Update active splits + s.ruleBasedSegmentStorage.Update(activeRB, inactiveRB, splitChanges.RuleBasedSegments.Till) + return segments +} + +func (s *UpdaterImpl) processRuleBasedSegmentChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.RuleBasedSegmentDTO, []dtos.RuleBasedSegmentDTO, []string) { + toRemove := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + toAdd := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + segments := make([]string, 0) + for _, rbSegment := range splitChanges.RuleBasedSegments.RuleBasedSegments { + if rbSegment.Status == Active { + validator.ProcessRBMatchers(&rbSegment, s.logger) + toAdd = append(toAdd, rbSegment) + segments = append(segments, s.getSegments(&rbSegment)...) + } else { + toRemove = append(toRemove, rbSegment) + } + } + return toAdd, toRemove, segments +} + +func addIfNotExists(segments []string, seen map[string]struct{}, name string) []string { + if _, exists := seen[name]; !exists { + seen[name] = struct{}{} + segments = append(segments, name) + } + return segments +} + +func (s *UpdaterImpl) getSegments(ruleBasedSegment *dtos.RuleBasedSegmentDTO) []string { + seen := make(map[string]struct{}) + segments := make([]string, 0) + + for _, segment := range ruleBasedSegment.Excluded.Segments { + if segment.Type == TypeStandard { + segments = addIfNotExists(segments, seen, segment.Name) + } + } + + for _, cond := range ruleBasedSegment.Conditions { + for _, matcher := range cond.MatcherGroup.Matchers { + if matcher.MatcherType == matcherTypeInSegment && matcher.UserDefinedSegment != nil { + segments = addIfNotExists(segments, seen, matcher.UserDefinedSegment.SegmentName) + } + } + } + + return segments +} + +func (s *UpdaterImpl) processRuleBasedChangeUpdate(ruleBasedChange *dtos.RuleBasedChangeUpdate) *UpdateResult { + changeNumber := s.ruleBasedSegmentStorage.ChangeNumber() + if changeNumber >= ruleBasedChange.BaseUpdate.ChangeNumber() { + s.logger.Debug("the rule-based segment it's already updated") + return &UpdateResult{RequiresFetch: true} + } + ruleBasedSegments := make([]dtos.RuleBasedSegmentDTO, 0, 1) + ruleBasedSegments = append(ruleBasedSegments, *ruleBasedChange.RuleBasedSegment()) + splitChanges := dtos.SplitChangesDTO{RuleBasedSegments: dtos.RuleBasedSegmentsDTO{RuleBasedSegments: ruleBasedSegments}} + toRemove, toAdd, segments := s.processRuleBasedSegmentChanges(&splitChanges) + s.ruleBasedSegmentStorage.Update(toAdd, toRemove, ruleBasedChange.BaseUpdate.ChangeNumber()) + + return &UpdateResult{ + ReferencedSegments: segments, + NewChangeNumber: ruleBasedChange.BaseUpdate.ChangeNumber(), + RequiresFetch: false, + } +} diff --git a/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go b/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go new file mode 100644 index 00000000..c638ca86 --- /dev/null +++ b/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go @@ -0,0 +1,266 @@ +package rulebasedsegment + +import ( + "sync/atomic" + "testing" + + "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-split-commons/v6/storage/mocks" + "github.com/splitio/go-toolkit/v5/logging" +) + +func TestProcessUpdate(t *testing.T) { + var updateCalled int64 + mockedRB1 := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: Active, + Conditions: []dtos.RuleBasedConditionDTO{ + { + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + { + MatcherType: matcherTypeInSegment, + UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ + SegmentName: "segment1", + }, + }, + }, + }, + }, + }, + Excluded: dtos.ExcludedDTO{ + Segments: []dtos.ExcluededSegmentDTO{ + { + Name: "segment2", + Type: TypeStandard, + }, + }, + }, + } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateCalled, 1) + }, + } + + ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitChanges := &dtos.SplitChangesDTO{ + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ + RuleBasedSegments: []dtos.RuleBasedSegmentDTO{mockedRB1}, + Since: 1, + Till: 2, + }, + } + + segments := ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) + + if atomic.LoadInt64(&updateCalled) != 1 { + t.Error("Update should be called once") + } + + if len(segments) != 2 { + t.Error("Should return 2 segments") + } + + found1, found2 := false, false + for _, segment := range segments { + if segment == "segment1" { + found1 = true + } + if segment == "segment2" { + found2 = true + } + } + + if !found1 || !found2 { + t.Error("Should return both segments") + } +} + +func TestProcessUpdateArchivedRB(t *testing.T) { + var updateCalled int64 + mockedRB1 := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: Archived, + Conditions: []dtos.RuleBasedConditionDTO{ + { + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + { + MatcherType: matcherTypeInSegment, + UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ + SegmentName: "segment1", + }, + }, + }, + }, + }, + }, + } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateCalled, 1) + }, + } + + ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitChanges := &dtos.SplitChangesDTO{ + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ + RuleBasedSegments: []dtos.RuleBasedSegmentDTO{mockedRB1}, + Since: 1, + Till: 2, + }, + } + + segments := ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) + + if atomic.LoadInt64(&updateCalled) != 1 { + t.Error("Update should be called once") + } + + if len(segments) != 0 { + t.Error("Should return no segments") + } +} + +func TestSynchronizeRuleBasedSegment(t *testing.T) { + var updateCalled int64 + var changeNumberCalled int64 + + mockedRB1 := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: Active, + Conditions: []dtos.RuleBasedConditionDTO{ + { + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + { + MatcherType: matcherTypeInSegment, + UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ + SegmentName: "segment1", + }, + }, + }, + }, + }, + }, + } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateCalled, 1) + }, + ChangeNumberCall: func() int64 { + atomic.AddInt64(&changeNumberCalled, 1) + return 0 + }, + } + + ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + var changeNumber int64 = 2 + result, err := ruleBasedSegmentUpdater.SynchronizeRuleBasedSegment(dtos.NewRuleBasedChangeUpdate( + dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), + &changeNumber, + &mockedRB1, + )) + + if err != nil { + t.Error("Should not return error") + } + + if atomic.LoadInt64(&updateCalled) != 1 { + t.Error("Update should be called once") + } + + if atomic.LoadInt64(&changeNumberCalled) != 1 { + t.Error("ChangeNumber should be called once") + } + + if len(result.ReferencedSegments) != 1 { + t.Error("Should return 1 segment") + } + + if result.ReferencedSegments[0] != "segment1" { + t.Error("Should return segment1") + } + + if result.NewChangeNumber != 2 { + t.Error("Should return change number 2") + } + + if result.RequiresFetch { + t.Error("Should not require fetch") + } +} + +func TestSynchronizeRuleBasedSegmentNoUpdate(t *testing.T) { + var updateCalled int64 + var changeNumberCalled int64 + + mockedRB1 := dtos.RuleBasedSegmentDTO{ + Name: "rb1", + Status: Active, + Conditions: []dtos.RuleBasedConditionDTO{ + { + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + { + MatcherType: matcherTypeInSegment, + UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ + SegmentName: "segment1", + }, + }, + }, + }, + }, + }, + } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateCalled, 1) + }, + ChangeNumberCall: func() int64 { + atomic.AddInt64(&changeNumberCalled, 1) + return 3 + }, + } + + ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + var changeNumber int64 = 2 + result, err := ruleBasedSegmentUpdater.SynchronizeRuleBasedSegment(dtos.NewRuleBasedChangeUpdate( + dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), + &changeNumber, + &mockedRB1, + )) + + if err != nil { + t.Error("Should not return error") + } + + if atomic.LoadInt64(&updateCalled) != 0 { + t.Error("Update should not be called") + } + + if atomic.LoadInt64(&changeNumberCalled) != 1 { + t.Error("ChangeNumber should be called once") + } + + if len(result.ReferencedSegments) != 0 { + t.Error("Should return no segments") + } + + if result.NewChangeNumber != 0 { + t.Error("Should return change number 0") + } + + if !result.RequiresFetch { + t.Error("Should require fetch") + } +} diff --git a/synchronizer/worker/split/split.go b/synchronizer/worker/split/split.go index 577ed6ca..27631886 100644 --- a/synchronizer/worker/split/split.go +++ b/synchronizer/worker/split/split.go @@ -10,6 +10,7 @@ import ( "github.com/splitio/go-split-commons/v6/healthcheck/application" "github.com/splitio/go-split-commons/v6/service" "github.com/splitio/go-split-commons/v6/storage" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/backoff" "github.com/splitio/go-toolkit/v5/common" @@ -44,6 +45,7 @@ type UpdateResult struct { ReferencedSegments []string ReferencedLargeSegments []string NewChangeNumber int64 + NewRBChangeNumber int64 RequiresFetch bool } @@ -58,6 +60,7 @@ type UpdaterImpl struct { splitStorage storage.SplitStorage splitFetcher service.SplitFetcher ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage + ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl logger logging.LoggerInterface runtimeTelemetry storage.TelemetryRuntimeProducer hcMonitor application.MonitorProducerInterface @@ -69,6 +72,8 @@ type UpdaterImpl struct { // NewSplitUpdater creates new split synchronizer for processing split updates func NewSplitUpdater( splitStorage storage.SplitStorage, + ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage, + ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl, splitFetcher service.SplitFetcher, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, @@ -84,16 +89,19 @@ func NewSplitUpdater( onDemandFetchBackoffBase: onDemandFetchBackoffBase, onDemandFetchBackoffMaxWait: onDemandFetchBackoffMaxWait, flagSetsFilter: flagSetsFilter, + ruleBasedSegmentStorage: ruleBasedSegmentStorage, + ruleBasedSegmentUpdater: ruleBasedSegmentUpdater, } } +func (s *UpdaterImpl) SetRuleBasedSegmentStorage(storage storage.RuleBasedSegmentsStorage) { + s.ruleBasedSegmentStorage = storage +} + func (s *UpdaterImpl) processUpdate(splitChanges *dtos.SplitChangesDTO) { activeSplits, inactiveSplits := s.processFeatureFlagChanges(splitChanges) // Add/Update active splits s.splitStorage.Update(activeSplits, inactiveSplits, splitChanges.FeatureFlags.Till) - activeRB, inactiveRB := s.processRuleBasedSegmentChanges(splitChanges) - // Add/Update active rule-based - s.ruleBasedSegmentStorage.Update(activeRB, inactiveRB, splitChanges.RuleBasedSegments.Till) } // fetchUntil Hit endpoint, update storage and return when since==till. @@ -122,12 +130,14 @@ func (s *UpdaterImpl) fetchUntil(fetchOptions *service.FlagRequestParams) (*Upda break } currentSince = splitChanges.FeatureFlags.Till + currentRBSince = splitChanges.RuleBasedSegments.Till s.runtimeTelemetry.RecordSyncLatency(telemetry.SplitSync, time.Since(before)) s.processUpdate(splitChanges) + segmentReferences = s.ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) segmentReferences = appendSegmentNames(segmentReferences, splitChanges) updatedSplitNames = appendSplitNames(updatedSplitNames, splitChanges) largeSegmentReferences = appendLargeSegmentNames(largeSegmentReferences, splitChanges) - if currentSince == splitChanges.FeatureFlags.Since { + if currentSince == splitChanges.FeatureFlags.Since && currentRBSince == splitChanges.RuleBasedSegments.Since { s.runtimeTelemetry.RecordSuccessfulSync(telemetry.SplitSync, time.Now().UTC()) break } @@ -136,6 +146,7 @@ func (s *UpdaterImpl) fetchUntil(fetchOptions *service.FlagRequestParams) (*Upda UpdatedSplits: common.DedupeStringSlice(updatedSplitNames), ReferencedSegments: common.DedupeStringSlice(segmentReferences), NewChangeNumber: currentSince, + NewRBChangeNumber: currentRBSince, ReferencedLargeSegments: common.DedupeStringSlice(largeSegmentReferences), }, err } @@ -150,7 +161,7 @@ func (s *UpdaterImpl) attemptSplitSync(fetchOptions *service.FlagRequestParams, if err != nil || remainingAttempts <= 0 { return internalSplitSync{updateResult: updateResult, successfulSync: false, attempt: remainingAttempts}, err } - if till == nil || *till <= updateResult.NewChangeNumber { + if till == nil || *till <= updateResult.NewChangeNumber || *till <= updateResult.NewRBChangeNumber { return internalSplitSync{updateResult: updateResult, successfulSync: true, attempt: remainingAttempts}, nil } howLong := internalBackoff.Next() @@ -223,12 +234,26 @@ func appendLargeSegmentNames(dst []string, splitChanges *dtos.SplitChangesDTO) [ return dst } +func addIfNotExists(dst []string, seen map[string]struct{}, name string) []string { + if _, exists := seen[name]; !exists { + seen[name] = struct{}{} + dst = append(dst, name) + } + return dst +} + func appendRuleBasedSegmentNames(dst []string, splitChanges *dtos.SplitChangesDTO) []string { + seen := make(map[string]struct{}) + // Inicializamos el mapa con lo que ya tiene dst para no duplicar tampoco ahí + for _, name := range dst { + seen[name] = struct{}{} + } + for _, split := range splitChanges.FeatureFlags.Splits { for _, cond := range split.Conditions { for _, matcher := range cond.MatcherGroup.Matchers { if matcher.MatcherType == matcherTypeInRuleBasedSegment && matcher.UserDefinedSegment != nil { - dst = append(dst, matcher.UserDefinedSegment.SegmentName) + dst = addIfNotExists(dst, seen, matcher.UserDefinedSegment.SegmentName) } } } @@ -250,20 +275,6 @@ func (s *UpdaterImpl) processFeatureFlagChanges(splitChanges *dtos.SplitChangesD return toAdd, toRemove } -func (s *UpdaterImpl) processRuleBasedSegmentChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.RuleBasedSegmentDTO, []dtos.RuleBasedSegmentDTO) { - toRemove := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) - toAdd := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) - for idx := range splitChanges.RuleBasedSegments.RuleBasedSegments { - if splitChanges.RuleBasedSegments.RuleBasedSegments[idx].Status == Active { - validator.ProcessRBMatchers(&splitChanges.RuleBasedSegments.RuleBasedSegments[idx], s.logger) - toAdd = append(toAdd, splitChanges.RuleBasedSegments.RuleBasedSegments[idx]) - } else { - toRemove = append(toRemove, splitChanges.RuleBasedSegments.RuleBasedSegments[idx]) - } - } - return toAdd, toRemove -} - // LocalKill marks a spit as killed in local storage func (s *UpdaterImpl) LocalKill(splitName string, defaultTreatment string, changeNumber int64) { s.splitStorage.KillLocally(splitName, defaultTreatment, changeNumber) @@ -279,36 +290,38 @@ func (s *UpdaterImpl) processFFChange(ffChange dtos.SplitChangeUpdate) *UpdateRe s.logger.Debug("the feature flag it's already updated") return &UpdateResult{RequiresFetch: false} } - if ffChange.FeatureFlag() != nil && *ffChange.PreviousChangeNumber() == changeNumber { - segmentReferences := make([]string, 0, 10) - updatedSplitNames := make([]string, 0, 1) - largeSegmentReferences := make([]string, 0, 10) - ruleBasedSegmentReferences := make([]string, 0, 10) - s.logger.Debug(fmt.Sprintf("updating feature flag %s", ffChange.FeatureFlag().Name)) - featureFlags := make([]dtos.SplitDTO, 0, 1) - featureFlags = append(featureFlags, *ffChange.FeatureFlag()) - featureFlagChange := dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - activeFFs, inactiveFFs := s.processFeatureFlagChanges(&featureFlagChange) - s.splitStorage.Update(activeFFs, inactiveFFs, ffChange.BaseUpdate.ChangeNumber()) - s.runtimeTelemetry.RecordUpdatesFromSSE(telemetry.SplitUpdate) - updatedSplitNames = append(updatedSplitNames, ffChange.FeatureFlag().Name) - segmentReferences = appendSegmentNames(segmentReferences, &featureFlagChange) - largeSegmentReferences = appendLargeSegmentNames(largeSegmentReferences, &featureFlagChange) - ruleBasedSegmentReferences = appendRuleBasedSegmentNames(ruleBasedSegmentReferences, &featureFlagChange) - requiresFetch := false - if !s.ruleBasedSegmentStorage.Contains(ruleBasedSegmentReferences) { - requiresFetch = true - } - return &UpdateResult{ - UpdatedSplits: updatedSplitNames, - ReferencedSegments: segmentReferences, - NewChangeNumber: ffChange.BaseUpdate.ChangeNumber(), - RequiresFetch: requiresFetch, - ReferencedLargeSegments: largeSegmentReferences, - } + if ffChange.FeatureFlag() == nil { + s.logger.Debug("the feature flag was nil") + return &UpdateResult{RequiresFetch: true} + } + + // If we have a feature flag, update it + segmentReferences := make([]string, 0, 10) + updatedSplitNames := make([]string, 0, 1) + largeSegmentReferences := make([]string, 0, 10) + ruleBasedSegmentReferences := make([]string, 0, 10) + s.logger.Debug(fmt.Sprintf("updating feature flag %s", ffChange.FeatureFlag().Name)) + featureFlags := make([]dtos.SplitDTO, 0, 1) + featureFlags = append(featureFlags, *ffChange.FeatureFlag()) + featureFlagChange := dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} + activeFFs, inactiveFFs := s.processFeatureFlagChanges(&featureFlagChange) + s.splitStorage.Update(activeFFs, inactiveFFs, ffChange.BaseUpdate.ChangeNumber()) + s.runtimeTelemetry.RecordUpdatesFromSSE(telemetry.SplitUpdate) + updatedSplitNames = append(updatedSplitNames, ffChange.FeatureFlag().Name) + segmentReferences = appendSegmentNames(segmentReferences, &featureFlagChange) + largeSegmentReferences = appendLargeSegmentNames(largeSegmentReferences, &featureFlagChange) + ruleBasedSegmentReferences = appendRuleBasedSegmentNames(ruleBasedSegmentReferences, &featureFlagChange) + requiresFetch := false + if len(ruleBasedSegmentReferences) > 0 && !s.ruleBasedSegmentStorage.Contains(ruleBasedSegmentReferences) { + requiresFetch = true + } + return &UpdateResult{ + UpdatedSplits: updatedSplitNames, + ReferencedSegments: segmentReferences, + NewChangeNumber: ffChange.BaseUpdate.ChangeNumber(), + RequiresFetch: requiresFetch, + ReferencedLargeSegments: largeSegmentReferences, } - s.logger.Debug("the feature flag was nil or the previous change number wasn't equal to the feature flag storage's change number") - return &UpdateResult{RequiresFetch: true} } func (s *UpdaterImpl) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*UpdateResult, error) { diff --git a/synchronizer/worker/split/split_test.go b/synchronizer/worker/split/split_test.go index e173490e..27a039ea 100644 --- a/synchronizer/worker/split/split_test.go +++ b/synchronizer/worker/split/split_test.go @@ -16,6 +16,7 @@ import ( "github.com/splitio/go-split-commons/v6/storage/inmemory" "github.com/splitio/go-split-commons/v6/storage/inmemory/mutexmap" "github.com/splitio/go-split-commons/v6/storage/mocks" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/common" "github.com/splitio/go-toolkit/v5/logging" @@ -48,6 +49,14 @@ func TestSplitSynchronizerError(t *testing.T) { }, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncErrorCall: func(resource, status int) { if resource != telemetry.SplitSync { @@ -65,7 +74,7 @@ func TestSplitSynchronizerError(t *testing.T) { }, } - splitUpdater := NewSplitUpdater(splitMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err == nil { @@ -104,6 +113,13 @@ func TestSplitSynchronizerErrorScRequestURITooLong(t *testing.T) { } }, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) appMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) { @@ -111,7 +127,7 @@ func TestSplitSynchronizerErrorScRequestURITooLong(t *testing.T) { }, } - splitUpdater := NewSplitUpdater(splitMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err == nil { @@ -131,6 +147,7 @@ func TestSplitSynchronizer(t *testing.T) { mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: true, Status: "INACTIVE", TrafficTypeName: "one"} var notifyEventCalled int64 + var updateRBCalled int64 splitMockStorage := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -196,7 +213,18 @@ func TestSplitSynchronizer(t *testing.T) { }, } - splitUpdater := NewSplitUpdater(splitMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -205,11 +233,15 @@ func TestSplitSynchronizer(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called once") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should be called once") + } } func TestSplitSyncProcess(t *testing.T) { var call int64 var notifyEventCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: true, Status: "INACTIVE", TrafficTypeName: "one"} @@ -260,7 +292,18 @@ func TestSplitSyncProcess(t *testing.T) { splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -321,12 +364,18 @@ func TestSplitSyncProcess(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 2 { t.Error("It should be called twice") } + if atomic.LoadInt64(&updateRBCalled) != 2 { + t.Error("It should be called twice") + } } func TestSplitTill(t *testing.T) { var call int64 var notifyEventCalled int64 + var updateRBCalled int64 + var changeNumberRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} + mockedRuleBased1 := dtos.RuleBasedSegmentDTO{Name: "rb1", Status: "ACTIVE"} splitMockFetcher := fetcherMock.MockSplitFetcher{ FetchCall: func(fetchOptions *service.FlagRequestParams) (*dtos.SplitChangesDTO, error) { @@ -335,6 +384,9 @@ func TestSplitTill(t *testing.T) { FeatureFlags: dtos.FeatureFlagsDTO{Splits: []dtos.SplitDTO{mockedSplit1}, Since: 2, Till: 2}, + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{RuleBasedSegments: []dtos.RuleBasedSegmentDTO{mockedRuleBased1}, + Since: 2, + Till: 2}, }, nil }, } @@ -349,7 +401,22 @@ func TestSplitTill(t *testing.T) { splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + atomic.AddInt64(&changeNumberRBCalled, 1) + if changeNumberRBCalled == 1 { + return -1 + } + return changeNumberRBCalled + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) var till int64 = 1 _, err := splitUpdater.SynchronizeSplits(&till) @@ -366,11 +433,18 @@ func TestSplitTill(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 2 { t.Error("It should be called twice") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should be called once") + } + if atomic.LoadInt64(&changeNumberRBCalled) != 3 { + t.Error("It should be called twice") + } } func TestByPassingCDN(t *testing.T) { var call int64 var notifyEventCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} splitMockFetcher := fetcherMock.MockSplitFetcher{ @@ -417,8 +491,18 @@ func TestByPassingCDN(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagsets.NewFlagSetFilter(nil)) splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 splitUpdater.onDemandFetchBackoffMaxWait = 10 * time.Nanosecond @@ -433,11 +517,16 @@ func TestByPassingCDN(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called twice instead of", atomic.LoadInt64(¬ifyEventCalled)) } + if atomic.LoadInt64(&updateRBCalled) != 12 { + t.Error("It should be called twice instead of", atomic.LoadInt64(&updateRBCalled)) + } + } func TestByPassingCDNLimit(t *testing.T) { var call int64 var notifyEventCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} splitMockFetcher := fetcherMock.MockSplitFetcher{ @@ -484,8 +573,18 @@ func TestByPassingCDNLimit(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagsets.NewFlagSetFilter(nil)) splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 splitUpdater.onDemandFetchBackoffMaxWait = 10 * time.Nanosecond @@ -500,6 +599,9 @@ func TestByPassingCDNLimit(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called twice instead of", atomic.LoadInt64(¬ifyEventCalled)) } + if atomic.LoadInt64(&updateRBCalled) != 21 { + t.Error("It should be called twenty one times instead of", atomic.LoadInt64(&updateRBCalled)) + } } func TestProcessFFChange(t *testing.T) { @@ -516,10 +618,18 @@ func TestProcessFFChange(t *testing.T) { return nil, nil }, } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - fetcher := NewSplitUpdater(ffStorageMock, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) result, _ := fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 12), nil, nil, @@ -534,6 +644,7 @@ func TestProcessFFChange(t *testing.T) { func TestAddOrUpdateFeatureFlagNil(t *testing.T) { var fetchCallCalled int64 + var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) ffStorageMock := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -556,7 +667,17 @@ func TestAddOrUpdateFeatureFlagNil(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - fetcher := NewSplitUpdater(ffStorageMock, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), nil, nil, @@ -564,6 +685,9 @@ func TestAddOrUpdateFeatureFlagNil(t *testing.T) { if atomic.LoadInt64(&fetchCallCalled) != 1 { t.Error("Fetch should be called once") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("Fetch should be called once") + } } func TestAddOrUpdateFeatureFlagPcnEquals(t *testing.T) { @@ -593,9 +717,16 @@ func TestAddOrUpdateFeatureFlagPcnEquals(t *testing.T) { return nil, nil }, } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - fetcher := NewSplitUpdater(ffStorageMock, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) featureFlag := dtos.SplitDTO{ChangeNumber: 4, Status: Active} @@ -637,9 +768,17 @@ func TestAddOrUpdateFeatureFlagArchive(t *testing.T) { return nil, nil }, } + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - fetcher := NewSplitUpdater(ffStorageMock, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) featureFlag := dtos.SplitDTO{ChangeNumber: 4, Status: Archived} fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( @@ -656,6 +795,7 @@ func TestAddOrUpdateFeatureFlagArchive(t *testing.T) { func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { var fetchCallCalled int64 var updateCalled int64 + var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) ffStorageMock := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -682,7 +822,17 @@ func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { appMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) {}, } - fetcher := NewSplitUpdater(ffStorageMock, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), nil, nil, @@ -693,6 +843,9 @@ func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { if atomic.LoadInt64(&updateCalled) != 1 { t.Error("It should update the storage") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestGetActiveFF(t *testing.T) { @@ -701,7 +854,14 @@ func TestGetActiveFF(t *testing.T) { featureFlags = append(featureFlags, dtos.SplitDTO{Status: Active}) featureFlagChanges := &dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - s := NewSplitUpdater(mocks.MockSplitStorage{}, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 2 { @@ -719,7 +879,15 @@ func TestGetInactiveFF(t *testing.T) { featureFlags = append(featureFlags, dtos.SplitDTO{Status: Archived}) featureFlagChanges := &dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - s := NewSplitUpdater(mocks.MockSplitStorage{}, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 0 { @@ -738,7 +906,15 @@ func TestGetActiveAndInactiveFF(t *testing.T) { featureFlagChanges := &dtos.SplitChangesDTO{ FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - s := NewSplitUpdater(mocks.MockSplitStorage{}, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 1 { @@ -752,6 +928,7 @@ func TestGetActiveAndInactiveFF(t *testing.T) { func TestSplitSyncWithSets(t *testing.T) { var call int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set1", "set2"}} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set4"}} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set5", "set1"}} @@ -783,7 +960,18 @@ func TestSplitSyncWithSets(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagsets.NewFlagSetFilter(nil)) splitStorage.Update([]dtos.SplitDTO{}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter([]string{"set1", "set2", "set3"})) + + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter([]string{"set1", "set2", "set3"})) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -803,10 +991,14 @@ func TestSplitSyncWithSets(t *testing.T) { if splitStorage.Split("split3") == nil { t.Error("split3 should be present") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestSplitSyncWithSetsInConfig(t *testing.T) { var call int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set1"}} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set4"}} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set5", "set2"}} @@ -841,7 +1033,18 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagSetFilter) splitStorage.Update([]dtos.SplitDTO{}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - splitUpdater := NewSplitUpdater(splitStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagSetFilter) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagSetFilter) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -868,10 +1071,20 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { if s4 == nil { t.Error("split4 should be present") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } func TestProcessMatchers(t *testing.T) { - splitUpdater := NewSplitUpdater(mocks.MockSplitStorage{}, fetcherMock.MockSplitFetcher{}, logging.NewLogger(nil), mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + } + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, logging.NewLogger(nil), mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) splitChange := &dtos.SplitChangesDTO{ FeatureFlags: dtos.FeatureFlagsDTO{Till: 1, Since: 1, Splits: []dtos.SplitDTO{ { diff --git a/tasks/splitsync_test.go b/tasks/splitsync_test.go index 1e83c5fc..0e3bcb08 100644 --- a/tasks/splitsync_test.go +++ b/tasks/splitsync_test.go @@ -12,6 +12,7 @@ import ( "github.com/splitio/go-split-commons/v6/service" fetcherMock "github.com/splitio/go-split-commons/v6/service/mocks" "github.com/splitio/go-split-commons/v6/storage/mocks" + "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/logging" @@ -20,6 +21,7 @@ import ( func TestSplitSyncTask(t *testing.T) { var call int64 var notifyEventCalled int64 + var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} @@ -90,8 +92,21 @@ func TestSplitSyncTask(t *testing.T) { }, } + ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ + ChangeNumberCall: func() int64 { + return -1 + }, + UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + atomic.AddInt64(&updateRBCalled, 1) + }, + } + + ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitTask := NewFetchSplitsTask( - split.NewSplitUpdater(splitMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + splitUpdater, 1, logging.NewLogger(&logging.LoggerOptions{}), ) @@ -113,4 +128,7 @@ func TestSplitSyncTask(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) < 1 { t.Error("It should be called at least once") } + if atomic.LoadInt64(&updateRBCalled) != 1 { + t.Error("It should update the storage") + } } From 78fd9d5826bb720904e6d91f99afd2b8d635b6c5 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Tue, 12 Aug 2025 17:02:21 -0300 Subject: [PATCH 3/9] Update imports --- go.mod | 3 +- go.sum | 81 +------------------ .../rulebasedsegment/rulebasedsegment.go | 2 +- 3 files changed, 6 insertions(+), 80 deletions(-) diff --git a/go.mod b/go.mod index 03b3ea1e..3fd8fbe6 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,10 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/pretty v0.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect ) require ( @@ -21,6 +23,5 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/redis/go-redis/v9 v9.0.4 // indirect - github.com/splitio/go-toolkit/v3 v3.0.1 golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) diff --git a/go.sum b/go.sum index 01902878..06b033a3 100644 --- a/go.sum +++ b/go.sum @@ -1,113 +1,38 @@ -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/bits-and-blooms/bitset v1.3.1 h1:y+qrlmq3XsWi+xZqSaueaE8ry8Y127iMxlMfqcK8p0g= github.com/bits-and-blooms/bitset v1.3.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bits-and-blooms/bloom/v3 v3.3.1 h1:K2+A19bXT8gJR5mU7y+1yW6hsKfNCjcP2uNfLFKncjQ= github.com/bits-and-blooms/bloom/v3 v3.3.1/go.mod h1:bhUUknWd5khVbTe4UgMCSiOOVJzr3tMoijSK3WwvW90= github.com/bsm/ginkgo/v2 v2.7.0 h1:ItPMPH90RbmZJt5GtkcNvIRuGEdwlBItdNVoyzaNQao= github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-redis/redis/v8 v8.0.0/go.mod h1:isLoQT/NFSP7V67lyvM9GmdvLdyZ7pEhsXvvyQtnQTo= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/redis/go-redis/v9 v9.0.4 h1:FC82T+CHJ/Q/PdyLW++GeCO+Ol59Y4T7R4jbgjvktgc= github.com/redis/go-redis/v9 v9.0.4/go.mod h1:WqMKv5vnQbRuZstUwxQI195wHy+t4PuXDOjzMvcuQHk= -github.com/splitio/go-toolkit/v3 v3.0.1 h1:/H2wytH9r4GT4FpVmMWe7wUX99Y67b15fSbfIT1lIt8= -github.com/splitio/go-toolkit/v3 v3.0.1/go.mod h1:HGgawLnM2RlM84zVRbATpPMjF7H6u9CUYG6RlpwOlOk= github.com/splitio/go-toolkit/v5 v5.4.0 h1:g5WFpRhQomnXCmvfsNOWV4s5AuUrWIZ+amM68G8NBKM= github.com/splitio/go-toolkit/v5 v5.4.0/go.mod h1:xYhUvV1gga9/1029Wbp5pjnR6Cy8nvBpjw99wAbsMko= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= github.com/twmb/murmur3 v1.1.6/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/synchronizer/worker/rulebasedsegment/rulebasedsegment.go b/synchronizer/worker/rulebasedsegment/rulebasedsegment.go index ec59f925..8a8b98c4 100644 --- a/synchronizer/worker/rulebasedsegment/rulebasedsegment.go +++ b/synchronizer/worker/rulebasedsegment/rulebasedsegment.go @@ -4,7 +4,7 @@ import ( "github.com/splitio/go-split-commons/v6/dtos" "github.com/splitio/go-split-commons/v6/engine/validator" "github.com/splitio/go-split-commons/v6/storage" - "github.com/splitio/go-toolkit/v3/logging" + "github.com/splitio/go-toolkit/v5/logging" ) const ( From 5059b9ae604ccfeb58dba087f3081e5cb9d234ed Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Wed, 13 Aug 2025 13:56:05 -0300 Subject: [PATCH 4/9] Rule-based segment update using the ff queue --- dtos/notification.go | 21 +- push/borrowed.go | 1 - push/manager.go | 2 +- push/mocks/sync.go | 5 - push/parser.go | 45 ++- push/parser_test.go | 170 ----------- push/rulebasedsegment.go | 78 ----- push/rulebasedsegment_test.go | 127 --------- synchronizer/local.go | 9 +- synchronizer/local_test.go | 14 +- synchronizer/mocks/mocks.go | 6 - synchronizer/synchronizer.go | 10 - synchronizer/synchronizer_test.go | 51 +--- .../rulebasedsegment/rulebasedsegment.go | 122 -------- .../rulebasedsegment/rulebasedsegment_test.go | 266 ------------------ synchronizer/worker/split/split.go | 79 +++++- synchronizer/worker/split/split_test.go | 69 ++--- tasks/splitsync_test.go | 5 +- 18 files changed, 155 insertions(+), 925 deletions(-) delete mode 100644 push/rulebasedsegment.go delete mode 100644 push/rulebasedsegment_test.go delete mode 100644 synchronizer/worker/rulebasedsegment/rulebasedsegment.go delete mode 100644 synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go diff --git a/dtos/notification.go b/dtos/notification.go index f000b476..1375d099 100644 --- a/dtos/notification.go +++ b/dtos/notification.go @@ -202,6 +202,7 @@ type SplitChangeUpdate struct { BaseUpdate previousChangeNumber *int64 featureFlag *SplitDTO + ruleBasedSegment *RuleBasedSegmentDTO } func NewSplitChangeUpdate(baseUpdate BaseUpdate, pcn *int64, featureFlag *SplitDTO) *SplitChangeUpdate { @@ -212,8 +213,26 @@ func NewSplitChangeUpdate(baseUpdate BaseUpdate, pcn *int64, featureFlag *SplitD } } +func NewRuleBasedSegmentChangeUpdate(baseUpdate BaseUpdate, pcn *int64, ruleBasedSegment *RuleBasedSegmentDTO) *SplitChangeUpdate { + return &SplitChangeUpdate{ + BaseUpdate: baseUpdate, + previousChangeNumber: pcn, + ruleBasedSegment: ruleBasedSegment, + } +} + // UpdateType always returns UpdateTypeSplitChange for SplitUpdate messages -func (u *SplitChangeUpdate) UpdateType() string { return UpdateTypeSplitChange } +func (u *SplitChangeUpdate) UpdateType() string { + if u.ruleBasedSegment != nil { + return TypeRuleBased + } + return UpdateTypeSplitChange +} + +// GetRuleBased returns rule-based segment +func (u *SplitChangeUpdate) RuleBasedSegment() *RuleBasedSegmentDTO { + return u.ruleBasedSegment +} // String returns the String representation of a split change notification func (u *SplitChangeUpdate) String() string { diff --git a/push/borrowed.go b/push/borrowed.go index 788a7159..d5211f0c 100644 --- a/push/borrowed.go +++ b/push/borrowed.go @@ -6,7 +6,6 @@ import "github.com/splitio/go-split-commons/v6/dtos" type synchronizerInterface interface { SyncAll() error SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error - SynchronizeRuleBasedSegments(ruleBasedChange *dtos.RuleBasedChangeUpdate) error LocalKill(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegment(segmentName string, till *int64) error StartPeriodicFetching() diff --git a/push/manager.go b/push/manager.go index 7a777f05..47554030 100644 --- a/push/manager.go +++ b/push/manager.go @@ -78,7 +78,7 @@ func NewManager( statusTracker := NewStatusTracker(logger, runtimeTelemetry) parser := NewNotificationParserImpl(logger, processor.ProcessSplitChangeUpdate, processor.ProcessSplitKillUpdate, processor.ProcessSegmentChangeUpdate, - statusTracker.HandleControl, statusTracker.HandleOccupancy, statusTracker.HandleAblyError, processor.ProcessLargeSegmentChangeUpdate, processor.ProcessorRuleBasedSegmentChangeUpdate) + statusTracker.HandleControl, statusTracker.HandleOccupancy, statusTracker.HandleAblyError, processor.ProcessLargeSegmentChangeUpdate) manager := &ManagerImpl{ authAPI: authAPI, diff --git a/push/mocks/sync.go b/push/mocks/sync.go index 57157cbf..af5ce506 100644 --- a/push/mocks/sync.go +++ b/push/mocks/sync.go @@ -7,7 +7,6 @@ import ( type LocalSyncMock struct { SyncAllCall func() error SynchronizeFeatureFlagsCall func(ffChange *dtos.SplitChangeUpdate) error - SynchronizeRuleBasedSegmentsCall func(rbChange *dtos.RuleBasedChangeUpdate) error LocalKillCall func(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegmentCall func(segmentName string, till *int64) error StartPeriodicFetchingCall func() @@ -50,10 +49,6 @@ func (l *LocalSyncMock) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate return l.SynchronizeFeatureFlagsCall(ffChange) } -func (l *LocalSyncMock) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { - return l.SynchronizeRuleBasedSegmentsCall(rbChange) -} - func (l *LocalSyncMock) SynchronizeLargeSegment(name string, till *int64) error { return l.SynchronizeLargeSegmentCall(name, till) } diff --git a/push/parser.go b/push/parser.go index d2ac3714..290372b7 100644 --- a/push/parser.go +++ b/push/parser.go @@ -34,16 +34,15 @@ type NotificationParser interface { // NotificationParserImpl implementas the NotificationParser interface type NotificationParserImpl struct { - dataUtils DataUtils - logger logging.LoggerInterface - onSplitUpdate func(*dtos.SplitChangeUpdate) error - onSplitKill func(*dtos.SplitKillUpdate) error - onSegmentUpdate func(*dtos.SegmentChangeUpdate) error - onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error - onRuleBasedsegmentUpdate func(*dtos.RuleBasedChangeUpdate) error - onControlUpdate func(*dtos.ControlUpdate) *int64 - onOccupancyMesage func(*dtos.OccupancyMessage) *int64 - onAblyError func(*dtos.AblyError) *int64 + dataUtils DataUtils + logger logging.LoggerInterface + onSplitUpdate func(*dtos.SplitChangeUpdate) error + onSplitKill func(*dtos.SplitKillUpdate) error + onSegmentUpdate func(*dtos.SegmentChangeUpdate) error + onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error + onControlUpdate func(*dtos.ControlUpdate) *int64 + onOccupancyMesage func(*dtos.OccupancyMessage) *int64 + onAblyError func(*dtos.AblyError) *int64 } func NewNotificationParserImpl( @@ -54,19 +53,17 @@ func NewNotificationParserImpl( onControlUpdate func(*dtos.ControlUpdate) *int64, onOccupancyMessage func(*dtos.OccupancyMessage) *int64, onAblyError func(*dtos.AblyError) *int64, - onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error, - onRuleBasedSegmentUpdate func(*dtos.RuleBasedChangeUpdate) error) *NotificationParserImpl { + onLargeSegmentUpdate func(*dtos.LargeSegmentChangeUpdate) error) *NotificationParserImpl { return &NotificationParserImpl{ - dataUtils: NewDataUtilsImpl(), - logger: loggerInterface, - onSplitUpdate: onSplitUpdate, - onSplitKill: onSplitKill, - onSegmentUpdate: onSegmentUpdate, - onControlUpdate: onControlUpdate, - onOccupancyMesage: onOccupancyMessage, - onAblyError: onAblyError, - onLargeSegmentUpdate: onLargeSegmentUpdate, - onRuleBasedsegmentUpdate: onRuleBasedSegmentUpdate, + dataUtils: NewDataUtilsImpl(), + logger: loggerInterface, + onSplitUpdate: onSplitUpdate, + onSplitKill: onSplitKill, + onSegmentUpdate: onSegmentUpdate, + onControlUpdate: onControlUpdate, + onOccupancyMesage: onOccupancyMessage, + onAblyError: onAblyError, + onLargeSegmentUpdate: onLargeSegmentUpdate, } } @@ -146,9 +143,9 @@ func (p *NotificationParserImpl) parseUpdate(data *genericData, nested *genericM case dtos.UpdateTypeRuleBasedChange: ruleBased := p.processRuleBasedMessage(nested) if ruleBased == nil { - return nil, p.onRuleBasedsegmentUpdate(dtos.NewRuleBasedChangeUpdate(base, nil, nil)) + return nil, p.onSplitUpdate(dtos.NewRuleBasedSegmentChangeUpdate(base, nil, nil)) } - return nil, p.onRuleBasedsegmentUpdate(dtos.NewRuleBasedChangeUpdate(base, &nested.PreviousChangeNumber, ruleBased)) + return nil, p.onSplitUpdate(dtos.NewRuleBasedSegmentChangeUpdate(base, &nested.PreviousChangeNumber, ruleBased)) default: // TODO: log full event in debug mode return nil, fmt.Errorf("invalid update type: %s", nested.Type) diff --git a/push/parser_test.go b/push/parser_test.go index 2b5dbce0..4591fc1d 100644 --- a/push/parser_test.go +++ b/push/parser_test.go @@ -1,7 +1,6 @@ package push import ( - "encoding/base64" "encoding/json" "testing" @@ -20,174 +19,6 @@ const FF_SHOULD_BE_MAURO_JAVA = "feature flag should be mauro_java" const FF_DEFINITION_ZLIB = "eJzMk99u2kwQxV8lOtdryQZj8N6hD5QPlThSTVNVEUKDPYZt1jZar1OlyO9emf8lVFWv2ss5zJyd82O8hTWUZSqZvW04opwhUVdsIKBSSKR+10vS1HWW7pIdz2NyBjRwHS8IXEopTLgbQqDYT+ZUm3LxlV4J4mg81LpMyKqygPRc94YeM6eQTtjphp4fegLVXvD6Qdjt9wPXF6gs2bqCxPC/2eRpDIEXpXXblpGuWCDljGptZ4bJ5lxYSJRZBoFkTcWKozpfsoH0goHfCXpB6PfcngDpVQnZEUjKIlOr2uwWqiC3zU5L1aF+3p7LFhUkPv8/mY2nk3gGgZxssmZzb8p6A9n25ktVtA9iGI3ODXunQ3HDp+AVWT6F+rZWlrWq7MN+YkSWWvuTDvkMSnNV7J6oTdl6qKTEvGnmjcCGjL2IYC/ovPYgUKnvvPtbmrmApiVryLM7p2jE++AfH6fTx09/HvuF32LWnNjStM0Xh3c8ukZcsZlEi3h8/zCObsBpJ0acqYLTmFdtqitK1V6NzrfpdPBbLmVx4uK26e27izpDu/r5yf/16AXun2Cr4u6w591xw7+LfDidLj6Mv8TXwP8xbofv/c7UmtHMmx8BAAD//0fclvU=" const FF_DEFINITION_GZIP = "H4sIAAAAAAAA/8yT327aTBDFXyU612vJxoTgvUMfKB8qcaSapqoihAZ7DNusvWi9TpUiv3tl/pdQVb1qL+cwc3bOj/EGzlKeq3T6tuaYCoZEXbGFgMogkXXDIM0y31v4C/aCgMnrU9/3gl7Pp4yilMMIAuVusqDamvlXeiWIg/FAa5OSU6aEDHz/ip4wZ5Be1AmjoBsFAtVOCO56UXh31/O7ApUjV1eQGPw3HT+NIPCitG7bctIVC2ScU63d1DK5gksHCZPnEEhXVC45rosFW8ig1++GYej3g85tJEB6aSA7Aqkpc7Ws7XahCnLTbLVM7evnzalsUUHi8//j6WgyTqYQKMilK7b31tRryLa3WKiyfRCDeHhq2Dntiys+JS/J8THUt5VyrFXlHnYTQ3LU2h91yGdQVqhy+0RtTeuhUoNZ08wagTVZdxbBndF5vYVApb7z9m9pZgKaFqwhT+6coRHvg398nEweP/157Bd+S1hz6oxtm88O73B0jbhgM47nyej+YRRfgdNODDlXJWcJL9tUF5SqnRqfbtPr4LdcTHnk4rfp3buLOkG7+Pmp++vRM9w/wVblzX7Pm8OGfxf5YDKZfxh9SS6B/2Pc9t/7ja01o5k1PwIAAP//uTipVskEAAA=" -func TestParseRuleBasedSegmentUpdate(t *testing.T) { - event := &sseMocks.RawEventMock{ - IDCall: func() string { return "abc" }, - EventCall: func() string { return dtos.SSEEventTypeMessage }, - DataCall: func() string { - ruleBasedSegment := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: "ACTIVE", - Conditions: []dtos.RuleBasedConditionDTO{ - { - MatcherGroup: dtos.MatcherGroupDTO{ - Matchers: []dtos.MatcherDTO{ - { - MatcherType: "IN_SEGMENT", - UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ - SegmentName: "segment1", - }, - }, - }, - }, - }, - }, - } - ruleBasedJSON, _ := json.Marshal(ruleBasedSegment) - def := base64.StdEncoding.EncodeToString(ruleBasedJSON) - compressType := 0 - updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeRuleBasedChange, - ChangeNumber: 123, - Definition: &def, - CompressType: &compressType, - }) - mainJSON, _ := json.Marshal(genericData{ - Timestamp: 123, - Data: string(updateJSON), - Channel: "sarasa_rule_based_segments", - }) - return string(mainJSON) - }, - IsErrorCall: func() bool { return false }, - IsEmptyCall: func() bool { return false }, - RetryCall: func() int64 { return 0 }, - } - - logger := logging.NewLogger(nil) - parser := &NotificationParserImpl{ - dataUtils: NewDataUtilsImpl(), - logger: logger, - onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { - if u.ChangeNumber() != 123 { - t.Error("Change number should be 123. Got:", u.ChangeNumber()) - } - if u.Channel() != "sarasa_rule_based_segments" { - t.Error("Channel should be sarasa_rule_based_segments. Got:", u.Channel()) - } - if u.RuleBasedSegment() == nil { - t.Error("Rule-based segment should not be nil") - } - if u.RuleBasedSegment().Name != "rb1" { - t.Error("Rule-based segment name should be rb1. Got:", u.RuleBasedSegment().Name) - } - if len(u.RuleBasedSegment().Conditions) != 1 { - t.Error("Rule-based segment should have 1 condition. Got:", len(u.RuleBasedSegment().Conditions)) - } - return nil - }, - } - - _, err := parser.ParseAndForward(event) - if err != nil { - t.Error("No error should have been returned. Got:", err) - } -} - -func TestParseRuleBasedSegmentUpdateWithPreviousChangeNumber(t *testing.T) { - event := &sseMocks.RawEventMock{ - IDCall: func() string { return "abc" }, - EventCall: func() string { return dtos.SSEEventTypeMessage }, - DataCall: func() string { - ruleBasedSegment := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: "ACTIVE", - } - var previousChangeNumber int64 = 100 - ruleBasedJSON, _ := json.Marshal(ruleBasedSegment) - def := base64.StdEncoding.EncodeToString(ruleBasedJSON) - compressType := 0 - updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeRuleBasedChange, - ChangeNumber: 123, - PreviousChangeNumber: previousChangeNumber, - Definition: &def, - CompressType: &compressType, - }) - mainJSON, _ := json.Marshal(genericData{ - Timestamp: 123, - Data: string(updateJSON), - Channel: "sarasa_rule_based_segments", - }) - return string(mainJSON) - }, - IsErrorCall: func() bool { return false }, - IsEmptyCall: func() bool { return false }, - RetryCall: func() int64 { return 0 }, - } - - logger := logging.NewLogger(nil) - parser := &NotificationParserImpl{ - dataUtils: NewDataUtilsImpl(), - logger: logger, - onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { - if u.ChangeNumber() != 123 { - t.Error("Change number should be 123. Got:", u.ChangeNumber()) - } - if u.PreviousChangeNumber() == nil { - t.Error("Previous change number should not be nil") - } else if *u.PreviousChangeNumber() != 100 { - t.Error("Previous change number should be 100. Got:", *u.PreviousChangeNumber()) - } - return nil - }, - } - - _, err := parser.ParseAndForward(event) - if err != nil { - t.Error("No error should have been returned. Got:", err) - } -} - -func TestParseRuleBasedSegmentUpdateWithNilSegment(t *testing.T) { - event := &sseMocks.RawEventMock{ - IDCall: func() string { return "abc" }, - EventCall: func() string { return dtos.SSEEventTypeMessage }, - DataCall: func() string { - updateJSON, _ := json.Marshal(genericMessageData{ - Type: dtos.UpdateTypeRuleBasedChange, - ChangeNumber: 123, - }) - mainJSON, _ := json.Marshal(genericData{ - Timestamp: 123, - Data: string(updateJSON), - Channel: "sarasa_rule_based_segments", - }) - return string(mainJSON) - }, - IsErrorCall: func() bool { return false }, - IsEmptyCall: func() bool { return false }, - RetryCall: func() int64 { return 0 }, - } - - logger := logging.NewLogger(nil) - parser := &NotificationParserImpl{ - dataUtils: NewDataUtilsImpl(), - logger: logger, - onRuleBasedsegmentUpdate: func(u *dtos.RuleBasedChangeUpdate) error { - if u.RuleBasedSegment() != nil { - t.Error("Rule-based segment should be nil") - } - return nil - }, - } - - _, err := parser.ParseAndForward(event) - if err != nil { - t.Error("No error should have been returned. Got:", err) - } -} - func TestParseSplitUpdate(t *testing.T) { event := &sseMocks.RawEventMock{ IDCall: func() string { return "abc" }, @@ -811,7 +642,6 @@ func TestNewNotificationParserImpl(t *testing.T) { return common.Int64Ref(123) }, nil, - nil, nil) if status, err := parser.ParseAndForward(event); *status != 123 || err != nil { diff --git a/push/rulebasedsegment.go b/push/rulebasedsegment.go deleted file mode 100644 index 33fac915..00000000 --- a/push/rulebasedsegment.go +++ /dev/null @@ -1,78 +0,0 @@ -package push - -import ( - "errors" - "fmt" - - "github.com/splitio/go-split-commons/v6/dtos" - "github.com/splitio/go-toolkit/v5/logging" - "github.com/splitio/go-toolkit/v5/struct/traits/lifecycle" -) - -// SplitUpdateWorker struct -type RuleBasedUpdateWorker struct { - ruleBasedQueue chan dtos.RuleBasedChangeUpdate - sync synchronizerInterface - logger logging.LoggerInterface - lifecycle lifecycle.Manager -} - -// NewRuleBasedUpdateWorker creates SplitRuleBasedWorker -func NewRuleBasedUpdateWorker( - ruleBasedQueue chan dtos.RuleBasedChangeUpdate, - synchronizer synchronizerInterface, - logger logging.LoggerInterface, -) (*RuleBasedUpdateWorker, error) { - if cap(ruleBasedQueue) < 5000 { - return nil, errors.New("") - } - - worker := &RuleBasedUpdateWorker{ - ruleBasedQueue: ruleBasedQueue, - sync: synchronizer, - logger: logger, - } - worker.lifecycle.Setup() - return worker, nil -} - -// Start starts worker -func (s *RuleBasedUpdateWorker) Start() { - if !s.lifecycle.BeginInitialization() { - s.logger.Info("Rule-based worker is already running") - return - } - - s.logger.Debug("Started RuleBasedUpdateWorker") - go func() { - defer s.lifecycle.ShutdownComplete() - s.lifecycle.InitializationComplete() - for { - select { - case ruleBasedUpdate := <-s.ruleBasedQueue: - s.logger.Debug("Received Rule-based update and proceding to perform fetch") - s.logger.Debug(fmt.Sprintf("ChangeNumber: %d", ruleBasedUpdate.ChangeNumber())) - err := s.sync.SynchronizeRuleBasedSegments(&ruleBasedUpdate) - if err != nil { - s.logger.Error(err) - } - case <-s.lifecycle.ShutdownRequested(): - return - } - } - }() -} - -// Stop stops worker -func (s *RuleBasedUpdateWorker) Stop() { - if !s.lifecycle.BeginShutdown() { - s.logger.Debug("Rule-based worker not runnning. Ignoring.") - return - } - s.lifecycle.AwaitShutdownComplete() -} - -// IsRunning indicates if worker is running or not -func (s *RuleBasedUpdateWorker) IsRunning() bool { - return s.lifecycle.IsRunning() -} diff --git a/push/rulebasedsegment_test.go b/push/rulebasedsegment_test.go deleted file mode 100644 index cc75b188..00000000 --- a/push/rulebasedsegment_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package push - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/splitio/go-split-commons/v6/dtos" - "github.com/splitio/go-toolkit/v5/logging" -) - -type mockSynchronizer struct { - syncCalled int64 - syncError atomic.Value -} - -func (m *mockSynchronizer) SyncAll() error { return nil } -func (m *mockSynchronizer) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error { - return nil -} -func (m *mockSynchronizer) SynchronizeRuleBasedSegments(update *dtos.RuleBasedChangeUpdate) error { - atomic.AddInt64(&m.syncCalled, 1) - if err := m.syncError.Load(); err != nil && err.(error) != nil { - return err.(error) - } - return nil -} -func (m *mockSynchronizer) LocalKill(splitName string, defaultTreatment string, changeNumber int64) {} -func (m *mockSynchronizer) SynchronizeSegment(segmentName string, till *int64) error { return nil } -func (m *mockSynchronizer) StartPeriodicFetching() {} -func (m *mockSynchronizer) StopPeriodicFetching() {} -func (m *mockSynchronizer) StartPeriodicDataRecording() {} -func (m *mockSynchronizer) StopPeriodicDataRecording() {} -func (m *mockSynchronizer) SynchronizeLargeSegment(name string, till *int64) error { return nil } -func (m *mockSynchronizer) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error { - return nil -} - -func TestRuleBasedUpdateWorkerCreation(t *testing.T) { - // Test with invalid queue size - smallQueue := make(chan dtos.RuleBasedChangeUpdate, 100) - _, err := NewRuleBasedUpdateWorker(smallQueue, nil, nil) - if err == nil { - t.Error("Should return error for small queue") - } - - // Test with valid queue size - validQueue := make(chan dtos.RuleBasedChangeUpdate, 5000) - worker, err := NewRuleBasedUpdateWorker(validQueue, nil, logging.NewLogger(&logging.LoggerOptions{})) - if err != nil { - t.Error("Should not return error for valid queue") - } - if worker == nil { - t.Error("Should return valid worker") - } -} - -func TestRuleBasedUpdateWorkerStartStop(t *testing.T) { - queue := make(chan dtos.RuleBasedChangeUpdate, 5000) - synchronizer := &mockSynchronizer{} - worker, _ := NewRuleBasedUpdateWorker(queue, synchronizer, logging.NewLogger(&logging.LoggerOptions{})) - - if worker.IsRunning() { - t.Error("Worker should not be running before Start") - } - - worker.Start() - time.Sleep(100 * time.Millisecond) // Wait for initialization - if !worker.IsRunning() { - t.Error("Worker should be running after Start") - } - - // Try to start again - worker.Start() - if !worker.IsRunning() { - t.Error("Worker should still be running after second Start") - } - - worker.Stop() - if worker.IsRunning() { - t.Error("Worker should not be running after Stop") - } - - // Try to stop again - worker.Stop() - if worker.IsRunning() { - t.Error("Worker should still not be running after second Stop") - } -} - -func TestRuleBasedUpdateWorkerProcessing(t *testing.T) { - queue := make(chan dtos.RuleBasedChangeUpdate, 5000) - synchronizer := &mockSynchronizer{} - synchronizer.syncError.Store(errors.New("")) - worker, _ := NewRuleBasedUpdateWorker(queue, synchronizer, logging.NewLogger(&logging.LoggerOptions{})) - - worker.Start() - - // Test successful update - var changeNumber int64 = 123 - queue <- *dtos.NewRuleBasedChangeUpdate( - dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 123), - &changeNumber, - &dtos.RuleBasedSegmentDTO{Name: "test"}, - ) - - time.Sleep(100 * time.Millisecond) - if atomic.LoadInt64(&synchronizer.syncCalled) != 1 { - t.Error("Synchronizer should be called once") - } - - // Test update with error - synchronizer.syncError.Store(errors.New("some error")) - queue <- *dtos.NewRuleBasedChangeUpdate( - dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 124), - &changeNumber, - &dtos.RuleBasedSegmentDTO{Name: "test"}, - ) - - time.Sleep(100 * time.Millisecond) - if atomic.LoadInt64(&synchronizer.syncCalled) != 2 { - t.Error("Synchronizer should be called twice") - } - - worker.Stop() -} diff --git a/synchronizer/local.go b/synchronizer/local.go index 4f8ee55a..004a6012 100644 --- a/synchronizer/local.go +++ b/synchronizer/local.go @@ -8,7 +8,6 @@ import ( "github.com/splitio/go-split-commons/v6/healthcheck/application" "github.com/splitio/go-split-commons/v6/service/api" "github.com/splitio/go-split-commons/v6/storage" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -33,14 +32,13 @@ type LocalConfig struct { } // NewLocal creates new Local -func NewLocal(cfg *LocalConfig, splitAPI *api.SplitAPI, splitStorage storage.SplitStorage, segmentStorage storage.SegmentStorage, ruleBasedStorage storage.RuleBasedSegmentsStorage, ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, hcMonitor application.MonitorProducerInterface) Synchronizer { - splitUpdater := split.NewSplitUpdater(splitStorage, ruleBasedStorage, ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, runtimeTelemetry, hcMonitor, flagsets.NewFlagSetFilter(cfg.FlagSets)) +func NewLocal(cfg *LocalConfig, splitAPI *api.SplitAPI, splitStorage storage.SplitStorage, segmentStorage storage.SegmentStorage, ruleBasedStorage storage.RuleBasedSegmentsStorage, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, hcMonitor application.MonitorProducerInterface) Synchronizer { + splitUpdater := split.NewSplitUpdater(splitStorage, ruleBasedStorage, splitAPI.SplitFetcher, logger, runtimeTelemetry, hcMonitor, flagsets.NewFlagSetFilter(cfg.FlagSets)) splitUpdater.SetRuleBasedSegmentStorage(ruleBasedStorage) workers := Workers{ SplitUpdater: splitUpdater, } - workers.RuleBasedSegmentUpdater = rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedStorage, logger) if cfg.SegmentDirectory != "" { workers.SegmentUpdater = segment.NewSegmentUpdater(splitStorage, segmentStorage, splitAPI.SegmentFetcher, logger, runtimeTelemetry, hcMonitor) } @@ -126,6 +124,3 @@ func (s *Local) LocalKill(splitName string, defaultTreatment string, changeNumbe // SynchronizeFeatureFlags no logic attached for localhost mode func (s *Local) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error { return nil } - -// SynchronizeRuleBasedsegments no logic attached for localhost mode -func (s *Local) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { return nil } diff --git a/synchronizer/local_test.go b/synchronizer/local_test.go index bdce1690..ef89ed92 100644 --- a/synchronizer/local_test.go +++ b/synchronizer/local_test.go @@ -13,7 +13,6 @@ import ( "github.com/splitio/go-split-commons/v6/service/api" httpMocks "github.com/splitio/go-split-commons/v6/service/mocks" "github.com/splitio/go-split-commons/v6/storage/mocks" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-toolkit/v5/logging" ) @@ -49,12 +48,9 @@ func TestLocalSyncAllError(t *testing.T) { return -1 }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - splitUpdater := split.NewSplitUpdater( splitMockStorage, ruleBasedSegmentMockStorage, - *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, @@ -64,8 +60,7 @@ func TestLocalSyncAllError(t *testing.T) { splitUpdater.SetRuleBasedSegmentStorage(ruleBasedSegmentMockStorage) workers := Workers{ - SplitUpdater: splitUpdater, - RuleBasedSegmentUpdater: rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logger), + SplitUpdater: splitUpdater, } syncForTest := &Local{ @@ -136,9 +131,7 @@ func TestLocalSyncAllOk(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - syncForTest := NewLocal(&LocalConfig{}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, logger, telemetryMockStorage, appMonitorMock) + syncForTest := NewLocal(&LocalConfig{}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, logger, telemetryMockStorage, appMonitorMock) err := syncForTest.SyncAll() if err != nil { t.Error("It should not return error") @@ -203,9 +196,8 @@ func TestLocalPeriodicFetching(t *testing.T) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - syncForTest := NewLocal(&LocalConfig{RefreshEnabled: true, SplitPeriod: 1}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, logger, telemetryMockStorage, appMonitorMock) + syncForTest := NewLocal(&LocalConfig{RefreshEnabled: true, SplitPeriod: 1}, &splitAPI, splitMockStorage, segmentMockStorage, ruleBasedSegmentMockStorage, logger, telemetryMockStorage, appMonitorMock) syncForTest.StartPeriodicFetching() time.Sleep(time.Millisecond * 1500) if atomic.LoadInt64(&splitFetchCalled) != 1 { diff --git a/synchronizer/mocks/mocks.go b/synchronizer/mocks/mocks.go index a7184893..4579a8bb 100644 --- a/synchronizer/mocks/mocks.go +++ b/synchronizer/mocks/mocks.go @@ -19,7 +19,6 @@ type MockSynchronizer struct { RefreshRatesCall func() (time.Duration, time.Duration) SynchronizeLargeSegmentCall func(name string, till *int64) error SynchronizeLargeSegmentUpdateCall func(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error - SynchronizeRuleBasedSegmentsCall func(rbChange *dtos.RuleBasedChangeUpdate) error } // SyncAll mock @@ -76,8 +75,3 @@ func (m *MockSynchronizer) SynchronizeLargeSegment(name string, till *int64) err func (m *MockSynchronizer) SynchronizeLargeSegmentUpdate(lsRFDResponseDTO *dtos.LargeSegmentRFDResponseDTO) error { return m.SynchronizeLargeSegmentUpdateCall(lsRFDResponseDTO) } - -// SynchronizeRuleBasedSegments call -func (m *MockSynchronizer) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { - return m.SynchronizeRuleBasedSegmentsCall(rbChange) -} diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index b37d597a..07124df2 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -9,7 +9,6 @@ import ( "github.com/splitio/go-split-commons/v6/synchronizer/worker/impression" "github.com/splitio/go-split-commons/v6/synchronizer/worker/impressionscount" "github.com/splitio/go-split-commons/v6/synchronizer/worker/largesegment" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -38,7 +37,6 @@ type Workers struct { SplitUpdater split.Updater SegmentUpdater segment.Updater LargeSegmentUpdater largesegment.Updater - RuleBasedSegmentUpdater rulebasedsegment.Updater TelemetryRecorder telemetry.TelemetrySynchronizer ImpressionRecorder impression.ImpressionRecorder EventRecorder event.EventRecorder @@ -49,7 +47,6 @@ type Workers struct { type Synchronizer interface { SyncAll() error SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) error - SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error LocalKill(splitName string, defaultTreatment string, changeNumber int64) SynchronizeSegment(segmentName string, till *int64) error StartPeriodicFetching() @@ -241,13 +238,6 @@ func (s *SynchronizerImpl) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpd return err } -// SynchronizeRuleBasedSegments syncs rule-based segments -func (s *SynchronizerImpl) SynchronizeRuleBasedSegments(rbChange *dtos.RuleBasedChangeUpdate) error { - result, err := s.workers.RuleBasedSegmentUpdater.SynchronizeRuleBasedSegment(rbChange) - s.synchronizeSegmentsAfterSplitAndRBSync(result.ReferencedSegments) - return err -} - func (s *SynchronizerImpl) dataFlusher() { for { msg := <-s.inMememoryFullQueue diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index f896a9e0..5ffa5052 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -21,7 +21,6 @@ import ( syncMocks "github.com/splitio/go-split-commons/v6/synchronizer/mocks" "github.com/splitio/go-split-commons/v6/synchronizer/worker/event" "github.com/splitio/go-split-commons/v6/synchronizer/worker/impression" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/segment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/tasks" @@ -48,9 +47,8 @@ func createSplitUpdater(splitMockStorage storageMock.MockSplitStorage, splitAPI UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) return splitUpdater } @@ -83,17 +81,14 @@ func TestSyncAllErrorSplits(t *testing.T) { ChangeNumberCall: func() int64 { return -1 }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) workers := Workers{ - SplitUpdater: splitUpdater, - SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, storageMock.MockSegmentStorage{}, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), - EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), - ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), - TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), - RuleBasedSegmentUpdater: rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logger), + SplitUpdater: splitUpdater, + SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, storageMock.MockSegmentStorage{}, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), + EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), + ImpressionRecorder: impression.NewRecorderSingle(storageMock.MockImpressionStorage{}, splitAPI.ImpressionRecorder, logger, dtos.Metadata{}, conf.ImpressionsModeDebug, telemetryMockStorage), + TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), } splitTasks := SplitTasks{ EventSyncTask: tasks.NewRecordEventsTask(workers.EventRecorder, advanced.EventsBulkSize, 10, logger), @@ -571,10 +566,8 @@ func TestSplitUpdateWorkerCNGreaterThanFFChange(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -632,10 +625,8 @@ func TestSplitUpdateWorkerStorageCNEqualsFFCN(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -700,10 +691,8 @@ func TestSplitUpdateWorkerFFPcnEqualsFFNotNil(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryStorage, appMonitorMock), } splitTasks := SplitTasks{ @@ -794,10 +783,8 @@ func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -877,10 +864,8 @@ func TestSplitUpdateWorkerFFIsNil(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -961,10 +946,8 @@ func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, hcMonitorMock), } splitTasks := SplitTasks{ @@ -1018,10 +1001,8 @@ func TestLocalKill(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, storageMock.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, storageMock.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)), } splitTasks := SplitTasks{ SplitSyncTask: tasks.NewFetchSplitsTask(workers.SplitUpdater, 1, logger), @@ -1099,10 +1080,8 @@ func TestSplitUpdateWithReferencedSegments(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - workers := Workers{ - SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), + SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), SegmentUpdater: segment.NewSegmentUpdater(splitMockStorage, segmentMockStorage, splitAPI.SegmentFetcher, logger, telemetryMockStorage, appMonitorMock), EventRecorder: event.NewEventRecorderSingle(storageMock.MockEventStorage{}, splitAPI.EventRecorder, logger, dtos.Metadata{}, telemetryMockStorage), TelemetryRecorder: telemetry.NewTelemetrySynchronizer(telemetryMockStorage, nil, nil, nil, nil, dtos.Metadata{}, telemetryMockStorage), diff --git a/synchronizer/worker/rulebasedsegment/rulebasedsegment.go b/synchronizer/worker/rulebasedsegment/rulebasedsegment.go deleted file mode 100644 index 8a8b98c4..00000000 --- a/synchronizer/worker/rulebasedsegment/rulebasedsegment.go +++ /dev/null @@ -1,122 +0,0 @@ -package rulebasedsegment - -import ( - "github.com/splitio/go-split-commons/v6/dtos" - "github.com/splitio/go-split-commons/v6/engine/validator" - "github.com/splitio/go-split-commons/v6/storage" - "github.com/splitio/go-toolkit/v5/logging" -) - -const ( - Active = "ACTIVE" - Archived = "ARCHIVED" - TypeStandard = "standard" - TypeRuleBased = "rule-based" - TypeLarge = "large" - matcherTypeInSegment = "IN_SEGMENT" -) - -// Updater interface -type Updater interface { - SynchronizeRuleBasedSegment(rbChange *dtos.RuleBasedChangeUpdate) (*UpdateResult, error) -} - -// UpdateResult encapsulates information regarding the split update performed -type UpdateResult struct { - ReferencedSegments []string - NewChangeNumber int64 - RequiresFetch bool -} - -// UpdaterImpl struct for split sync -type UpdaterImpl struct { - ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage - logger logging.LoggerInterface -} - -// NewRuleBasedUpdater creates new split synchronizer for processing rule-based updates -func NewRuleBasedSegmentUpdater( - ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage, - logger logging.LoggerInterface, -) *UpdaterImpl { - return &UpdaterImpl{ - ruleBasedSegmentStorage: ruleBasedSegmentStorage, - logger: logger, - } -} - -func (s *UpdaterImpl) SynchronizeRuleBasedSegment(ruleBasedChange *dtos.RuleBasedChangeUpdate) (*UpdateResult, error) { - result := s.processRuleBasedChangeUpdate(ruleBasedChange) - return result, nil -} - -func (s *UpdaterImpl) ProcessUpdate(splitChanges *dtos.SplitChangesDTO) []string { - activeRB, inactiveRB, segments := s.processRuleBasedSegmentChanges(splitChanges) - // Add/Update active splits - s.ruleBasedSegmentStorage.Update(activeRB, inactiveRB, splitChanges.RuleBasedSegments.Till) - return segments -} - -func (s *UpdaterImpl) processRuleBasedSegmentChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.RuleBasedSegmentDTO, []dtos.RuleBasedSegmentDTO, []string) { - toRemove := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) - toAdd := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) - segments := make([]string, 0) - for _, rbSegment := range splitChanges.RuleBasedSegments.RuleBasedSegments { - if rbSegment.Status == Active { - validator.ProcessRBMatchers(&rbSegment, s.logger) - toAdd = append(toAdd, rbSegment) - segments = append(segments, s.getSegments(&rbSegment)...) - } else { - toRemove = append(toRemove, rbSegment) - } - } - return toAdd, toRemove, segments -} - -func addIfNotExists(segments []string, seen map[string]struct{}, name string) []string { - if _, exists := seen[name]; !exists { - seen[name] = struct{}{} - segments = append(segments, name) - } - return segments -} - -func (s *UpdaterImpl) getSegments(ruleBasedSegment *dtos.RuleBasedSegmentDTO) []string { - seen := make(map[string]struct{}) - segments := make([]string, 0) - - for _, segment := range ruleBasedSegment.Excluded.Segments { - if segment.Type == TypeStandard { - segments = addIfNotExists(segments, seen, segment.Name) - } - } - - for _, cond := range ruleBasedSegment.Conditions { - for _, matcher := range cond.MatcherGroup.Matchers { - if matcher.MatcherType == matcherTypeInSegment && matcher.UserDefinedSegment != nil { - segments = addIfNotExists(segments, seen, matcher.UserDefinedSegment.SegmentName) - } - } - } - - return segments -} - -func (s *UpdaterImpl) processRuleBasedChangeUpdate(ruleBasedChange *dtos.RuleBasedChangeUpdate) *UpdateResult { - changeNumber := s.ruleBasedSegmentStorage.ChangeNumber() - if changeNumber >= ruleBasedChange.BaseUpdate.ChangeNumber() { - s.logger.Debug("the rule-based segment it's already updated") - return &UpdateResult{RequiresFetch: true} - } - ruleBasedSegments := make([]dtos.RuleBasedSegmentDTO, 0, 1) - ruleBasedSegments = append(ruleBasedSegments, *ruleBasedChange.RuleBasedSegment()) - splitChanges := dtos.SplitChangesDTO{RuleBasedSegments: dtos.RuleBasedSegmentsDTO{RuleBasedSegments: ruleBasedSegments}} - toRemove, toAdd, segments := s.processRuleBasedSegmentChanges(&splitChanges) - s.ruleBasedSegmentStorage.Update(toAdd, toRemove, ruleBasedChange.BaseUpdate.ChangeNumber()) - - return &UpdateResult{ - ReferencedSegments: segments, - NewChangeNumber: ruleBasedChange.BaseUpdate.ChangeNumber(), - RequiresFetch: false, - } -} diff --git a/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go b/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go deleted file mode 100644 index c638ca86..00000000 --- a/synchronizer/worker/rulebasedsegment/rulebasedsegment_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package rulebasedsegment - -import ( - "sync/atomic" - "testing" - - "github.com/splitio/go-split-commons/v6/dtos" - "github.com/splitio/go-split-commons/v6/storage/mocks" - "github.com/splitio/go-toolkit/v5/logging" -) - -func TestProcessUpdate(t *testing.T) { - var updateCalled int64 - mockedRB1 := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: Active, - Conditions: []dtos.RuleBasedConditionDTO{ - { - MatcherGroup: dtos.MatcherGroupDTO{ - Matchers: []dtos.MatcherDTO{ - { - MatcherType: matcherTypeInSegment, - UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ - SegmentName: "segment1", - }, - }, - }, - }, - }, - }, - Excluded: dtos.ExcludedDTO{ - Segments: []dtos.ExcluededSegmentDTO{ - { - Name: "segment2", - Type: TypeStandard, - }, - }, - }, - } - - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateCalled, 1) - }, - } - - ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitChanges := &dtos.SplitChangesDTO{ - RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ - RuleBasedSegments: []dtos.RuleBasedSegmentDTO{mockedRB1}, - Since: 1, - Till: 2, - }, - } - - segments := ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) - - if atomic.LoadInt64(&updateCalled) != 1 { - t.Error("Update should be called once") - } - - if len(segments) != 2 { - t.Error("Should return 2 segments") - } - - found1, found2 := false, false - for _, segment := range segments { - if segment == "segment1" { - found1 = true - } - if segment == "segment2" { - found2 = true - } - } - - if !found1 || !found2 { - t.Error("Should return both segments") - } -} - -func TestProcessUpdateArchivedRB(t *testing.T) { - var updateCalled int64 - mockedRB1 := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: Archived, - Conditions: []dtos.RuleBasedConditionDTO{ - { - MatcherGroup: dtos.MatcherGroupDTO{ - Matchers: []dtos.MatcherDTO{ - { - MatcherType: matcherTypeInSegment, - UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ - SegmentName: "segment1", - }, - }, - }, - }, - }, - }, - } - - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateCalled, 1) - }, - } - - ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitChanges := &dtos.SplitChangesDTO{ - RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ - RuleBasedSegments: []dtos.RuleBasedSegmentDTO{mockedRB1}, - Since: 1, - Till: 2, - }, - } - - segments := ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) - - if atomic.LoadInt64(&updateCalled) != 1 { - t.Error("Update should be called once") - } - - if len(segments) != 0 { - t.Error("Should return no segments") - } -} - -func TestSynchronizeRuleBasedSegment(t *testing.T) { - var updateCalled int64 - var changeNumberCalled int64 - - mockedRB1 := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: Active, - Conditions: []dtos.RuleBasedConditionDTO{ - { - MatcherGroup: dtos.MatcherGroupDTO{ - Matchers: []dtos.MatcherDTO{ - { - MatcherType: matcherTypeInSegment, - UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ - SegmentName: "segment1", - }, - }, - }, - }, - }, - }, - } - - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateCalled, 1) - }, - ChangeNumberCall: func() int64 { - atomic.AddInt64(&changeNumberCalled, 1) - return 0 - }, - } - - ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - var changeNumber int64 = 2 - result, err := ruleBasedSegmentUpdater.SynchronizeRuleBasedSegment(dtos.NewRuleBasedChangeUpdate( - dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), - &changeNumber, - &mockedRB1, - )) - - if err != nil { - t.Error("Should not return error") - } - - if atomic.LoadInt64(&updateCalled) != 1 { - t.Error("Update should be called once") - } - - if atomic.LoadInt64(&changeNumberCalled) != 1 { - t.Error("ChangeNumber should be called once") - } - - if len(result.ReferencedSegments) != 1 { - t.Error("Should return 1 segment") - } - - if result.ReferencedSegments[0] != "segment1" { - t.Error("Should return segment1") - } - - if result.NewChangeNumber != 2 { - t.Error("Should return change number 2") - } - - if result.RequiresFetch { - t.Error("Should not require fetch") - } -} - -func TestSynchronizeRuleBasedSegmentNoUpdate(t *testing.T) { - var updateCalled int64 - var changeNumberCalled int64 - - mockedRB1 := dtos.RuleBasedSegmentDTO{ - Name: "rb1", - Status: Active, - Conditions: []dtos.RuleBasedConditionDTO{ - { - MatcherGroup: dtos.MatcherGroupDTO{ - Matchers: []dtos.MatcherDTO{ - { - MatcherType: matcherTypeInSegment, - UserDefinedSegment: &dtos.UserDefinedSegmentMatcherDataDTO{ - SegmentName: "segment1", - }, - }, - }, - }, - }, - }, - } - - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateCalled, 1) - }, - ChangeNumberCall: func() int64 { - atomic.AddInt64(&changeNumberCalled, 1) - return 3 - }, - } - - ruleBasedSegmentUpdater := NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - var changeNumber int64 = 2 - result, err := ruleBasedSegmentUpdater.SynchronizeRuleBasedSegment(dtos.NewRuleBasedChangeUpdate( - dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), - &changeNumber, - &mockedRB1, - )) - - if err != nil { - t.Error("Should not return error") - } - - if atomic.LoadInt64(&updateCalled) != 0 { - t.Error("Update should not be called") - } - - if atomic.LoadInt64(&changeNumberCalled) != 1 { - t.Error("ChangeNumber should be called once") - } - - if len(result.ReferencedSegments) != 0 { - t.Error("Should return no segments") - } - - if result.NewChangeNumber != 0 { - t.Error("Should return change number 0") - } - - if !result.RequiresFetch { - t.Error("Should require fetch") - } -} diff --git a/synchronizer/worker/split/split.go b/synchronizer/worker/split/split.go index 27631886..cc809160 100644 --- a/synchronizer/worker/split/split.go +++ b/synchronizer/worker/split/split.go @@ -10,7 +10,6 @@ import ( "github.com/splitio/go-split-commons/v6/healthcheck/application" "github.com/splitio/go-split-commons/v6/service" "github.com/splitio/go-split-commons/v6/storage" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/backoff" "github.com/splitio/go-toolkit/v5/common" @@ -21,6 +20,9 @@ const ( matcherTypeInSegment = "IN_SEGMENT" matcherTypeInLargeSegment = "IN_LARGE_SEGMENT" matcherTypeInRuleBasedSegment = "IN_RULE_BASED_SEGMENT" + UpdateTypeSplitChange = "SPLIT_UPDATE" + UpdateTypeRuleBasedChange = "RB_SEGMENT_UPDATE" + TypeStandard = "standard" scRequestURITooLong = 414 onDemandFetchBackoffBase = int64(10) // backoff base starting at 10 seconds onDemandFetchBackoffMaxWait = 60 * time.Second // don't sleep for more than 1 minute @@ -60,7 +62,6 @@ type UpdaterImpl struct { splitStorage storage.SplitStorage splitFetcher service.SplitFetcher ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage - ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl logger logging.LoggerInterface runtimeTelemetry storage.TelemetryRuntimeProducer hcMonitor application.MonitorProducerInterface @@ -73,7 +74,6 @@ type UpdaterImpl struct { func NewSplitUpdater( splitStorage storage.SplitStorage, ruleBasedSegmentStorage storage.RuleBasedSegmentsStorage, - ruleBasedSegmentUpdater rulebasedsegment.UpdaterImpl, splitFetcher service.SplitFetcher, logger logging.LoggerInterface, runtimeTelemetry storage.TelemetryRuntimeProducer, @@ -90,7 +90,6 @@ func NewSplitUpdater( onDemandFetchBackoffMaxWait: onDemandFetchBackoffMaxWait, flagSetsFilter: flagSetsFilter, ruleBasedSegmentStorage: ruleBasedSegmentStorage, - ruleBasedSegmentUpdater: ruleBasedSegmentUpdater, } } @@ -104,6 +103,13 @@ func (s *UpdaterImpl) processUpdate(splitChanges *dtos.SplitChangesDTO) { s.splitStorage.Update(activeSplits, inactiveSplits, splitChanges.FeatureFlags.Till) } +func (s *UpdaterImpl) processRuleBasedUpdate(splitChanges *dtos.SplitChangesDTO) []string { + activeRB, inactiveRB, segments := s.processRuleBasedSegmentChanges(splitChanges) + // Add/Update active splits + s.ruleBasedSegmentStorage.Update(activeRB, inactiveRB, splitChanges.RuleBasedSegments.Till) + return segments +} + // fetchUntil Hit endpoint, update storage and return when since==till. func (s *UpdaterImpl) fetchUntil(fetchOptions *service.FlagRequestParams) (*UpdateResult, error) { // just guessing sizes so the we don't realloc immediately @@ -133,7 +139,7 @@ func (s *UpdaterImpl) fetchUntil(fetchOptions *service.FlagRequestParams) (*Upda currentRBSince = splitChanges.RuleBasedSegments.Till s.runtimeTelemetry.RecordSyncLatency(telemetry.SplitSync, time.Since(before)) s.processUpdate(splitChanges) - segmentReferences = s.ruleBasedSegmentUpdater.ProcessUpdate(splitChanges) + segmentReferences = s.processRuleBasedUpdate(splitChanges) segmentReferences = appendSegmentNames(segmentReferences, splitChanges) updatedSplitNames = appendSplitNames(updatedSplitNames, splitChanges) largeSegmentReferences = appendLargeSegmentNames(largeSegmentReferences, splitChanges) @@ -324,8 +330,69 @@ func (s *UpdaterImpl) processFFChange(ffChange dtos.SplitChangeUpdate) *UpdateRe } } +func (s *UpdaterImpl) getSegments(ruleBasedSegment *dtos.RuleBasedSegmentDTO) []string { + seen := make(map[string]struct{}) + segments := make([]string, 0) + + for _, segment := range ruleBasedSegment.Excluded.Segments { + if segment.Type == TypeStandard { + segments = addIfNotExists(segments, seen, segment.Name) + } + } + + for _, cond := range ruleBasedSegment.Conditions { + for _, matcher := range cond.MatcherGroup.Matchers { + if matcher.MatcherType == matcherTypeInSegment && matcher.UserDefinedSegment != nil { + segments = addIfNotExists(segments, seen, matcher.UserDefinedSegment.SegmentName) + } + } + } + + return segments +} + +func (s *UpdaterImpl) processRuleBasedSegmentChanges(splitChanges *dtos.SplitChangesDTO) ([]dtos.RuleBasedSegmentDTO, []dtos.RuleBasedSegmentDTO, []string) { + toRemove := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + toAdd := make([]dtos.RuleBasedSegmentDTO, 0, len(splitChanges.RuleBasedSegments.RuleBasedSegments)) + segments := make([]string, 0) + for _, rbSegment := range splitChanges.RuleBasedSegments.RuleBasedSegments { + if rbSegment.Status == Active { + validator.ProcessRBMatchers(&rbSegment, s.logger) + toAdd = append(toAdd, rbSegment) + segments = append(segments, s.getSegments(&rbSegment)...) + } else { + toRemove = append(toRemove, rbSegment) + } + } + return toAdd, toRemove, segments +} + +func (s *UpdaterImpl) processRuleBasedChangeUpdate(ruleBasedChange dtos.SplitChangeUpdate) *UpdateResult { + changeNumber := s.ruleBasedSegmentStorage.ChangeNumber() + if changeNumber >= ruleBasedChange.BaseUpdate.ChangeNumber() { + s.logger.Debug("the rule-based segment it's already updated") + return &UpdateResult{RequiresFetch: true} + } + ruleBasedSegments := make([]dtos.RuleBasedSegmentDTO, 0, 1) + ruleBasedSegments = append(ruleBasedSegments, *ruleBasedChange.RuleBasedSegment()) + splitChanges := dtos.SplitChangesDTO{RuleBasedSegments: dtos.RuleBasedSegmentsDTO{RuleBasedSegments: ruleBasedSegments}} + toRemove, toAdd, segments := s.processRuleBasedSegmentChanges(&splitChanges) + s.ruleBasedSegmentStorage.Update(toAdd, toRemove, ruleBasedChange.BaseUpdate.ChangeNumber()) + + return &UpdateResult{ + ReferencedSegments: segments, + NewChangeNumber: ruleBasedChange.BaseUpdate.ChangeNumber(), + RequiresFetch: false, + } +} + func (s *UpdaterImpl) SynchronizeFeatureFlags(ffChange *dtos.SplitChangeUpdate) (*UpdateResult, error) { - result := s.processFFChange(*ffChange) + var result *UpdateResult + if ffChange.UpdateType() == UpdateTypeSplitChange { + result = s.processFFChange(*ffChange) + } else { + result = s.processRuleBasedChangeUpdate(*ffChange) + } if result.RequiresFetch { return s.SynchronizeSplits(common.Int64Ref(ffChange.BaseUpdate.ChangeNumber())) } diff --git a/synchronizer/worker/split/split_test.go b/synchronizer/worker/split/split_test.go index 27a039ea..a153fb42 100644 --- a/synchronizer/worker/split/split_test.go +++ b/synchronizer/worker/split/split_test.go @@ -16,7 +16,6 @@ import ( "github.com/splitio/go-split-commons/v6/storage/inmemory" "github.com/splitio/go-split-commons/v6/storage/inmemory/mutexmap" "github.com/splitio/go-split-commons/v6/storage/mocks" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/common" "github.com/splitio/go-toolkit/v5/logging" @@ -55,8 +54,6 @@ func TestSplitSynchronizerError(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncErrorCall: func(resource, status int) { if resource != telemetry.SplitSync { @@ -74,7 +71,7 @@ func TestSplitSynchronizerError(t *testing.T) { }, } - splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err == nil { @@ -118,16 +115,13 @@ func TestSplitSynchronizerErrorScRequestURITooLong(t *testing.T) { return -1 }, } - - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - appMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err == nil { @@ -222,9 +216,7 @@ func TestSplitSynchronizer(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) _, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -300,10 +292,7 @@ func TestSplitSyncProcess(t *testing.T) { atomic.AddInt64(&updateRBCalled, 1) }, } - - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -413,10 +402,7 @@ func TestSplitTill(t *testing.T) { atomic.AddInt64(&updateRBCalled, 1) }, } - - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) var till int64 = 1 _, err := splitUpdater.SynchronizeSplits(&till) @@ -500,9 +486,7 @@ func TestByPassingCDN(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 splitUpdater.onDemandFetchBackoffMaxWait = 10 * time.Nanosecond @@ -582,9 +566,7 @@ func TestByPassingCDNLimit(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 splitUpdater.onDemandFetchBackoffMaxWait = 10 * time.Nanosecond @@ -625,11 +607,10 @@ func TestProcessFFChange(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) result, _ := fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 12), nil, nil, @@ -675,9 +656,8 @@ func TestAddOrUpdateFeatureFlagNil(t *testing.T) { atomic.AddInt64(&updateRBCalled, 1) }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), nil, nil, @@ -723,10 +703,10 @@ func TestAddOrUpdateFeatureFlagPcnEquals(t *testing.T) { return -1 }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) + telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) featureFlag := dtos.SplitDTO{ChangeNumber: 4, Status: Active} @@ -777,8 +757,7 @@ func TestAddOrUpdateFeatureFlagArchive(t *testing.T) { telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) featureFlag := dtos.SplitDTO{ChangeNumber: 4, Status: Archived} fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( @@ -831,8 +810,7 @@ func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { atomic.AddInt64(&updateRBCalled, 1) }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( dtos.NewBaseUpdate(dtos.NewBaseMessage(0, "some"), 2), nil, nil, @@ -859,9 +837,8 @@ func TestGetActiveFF(t *testing.T) { return -1 }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 2 { @@ -885,9 +862,7 @@ func TestGetInactiveFF(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 0 { @@ -912,9 +887,7 @@ func TestGetActiveAndInactiveFF(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) if len(actives) != 1 { @@ -969,9 +942,8 @@ func TestSplitSyncWithSets(t *testing.T) { atomic.AddInt64(&updateRBCalled, 1) }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter([]string{"set1", "set2", "set3"})) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter([]string{"set1", "set2", "set3"})) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -1042,9 +1014,7 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagSetFilter) + splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagSetFilter) res, err := splitUpdater.SynchronizeSplits(nil) if err != nil { @@ -1082,9 +1052,8 @@ func TestProcessMatchers(t *testing.T) { return -1 }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - splitUpdater := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, fetcherMock.MockSplitFetcher{}, logging.NewLogger(nil), mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) + splitUpdater := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, logging.NewLogger(nil), mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) splitChange := &dtos.SplitChangesDTO{ FeatureFlags: dtos.FeatureFlagsDTO{Till: 1, Since: 1, Splits: []dtos.SplitDTO{ { diff --git a/tasks/splitsync_test.go b/tasks/splitsync_test.go index 0e3bcb08..452f2c98 100644 --- a/tasks/splitsync_test.go +++ b/tasks/splitsync_test.go @@ -12,7 +12,6 @@ import ( "github.com/splitio/go-split-commons/v6/service" fetcherMock "github.com/splitio/go-split-commons/v6/service/mocks" "github.com/splitio/go-split-commons/v6/storage/mocks" - "github.com/splitio/go-split-commons/v6/synchronizer/worker/rulebasedsegment" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/logging" @@ -101,9 +100,7 @@ func TestSplitSyncTask(t *testing.T) { }, } - ruleBasedSegmentUpdater := rulebasedsegment.NewRuleBasedSegmentUpdater(ruleBasedSegmentMockStorage, logging.NewLogger(&logging.LoggerOptions{})) - - splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, *ruleBasedSegmentUpdater, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) + splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitTask := NewFetchSplitsTask( splitUpdater, From 97b738833c96fc07d9197cbde6a5fe7d0f87e82d Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Wed, 13 Aug 2025 14:00:14 -0300 Subject: [PATCH 5/9] Remove rule-based segment update --- dtos/notification.go | 32 -------------------------------- push/processor.go | 24 +++++++----------------- 2 files changed, 7 insertions(+), 49 deletions(-) diff --git a/dtos/notification.go b/dtos/notification.go index 1375d099..0d98174e 100644 --- a/dtos/notification.go +++ b/dtos/notification.go @@ -352,36 +352,6 @@ func (u *LargeSegmentChangeUpdate) String() string { u.Channel(), u.ChangeNumber(), len(u.LargeSegments), u.Timestamp()) } -// RuleBasedChangeUpdate represents a RuleBasedChange notification generated in the split servers -type RuleBasedChangeUpdate struct { - BaseUpdate - previousChangeNumber *int64 - ruleBasedSegment *RuleBasedSegmentDTO -} - -func NewRuleBasedChangeUpdate(baseUpdate BaseUpdate, pcn *int64, ruleBasedSegment *RuleBasedSegmentDTO) *RuleBasedChangeUpdate { - return &RuleBasedChangeUpdate{ - BaseUpdate: baseUpdate, - previousChangeNumber: pcn, - ruleBasedSegment: ruleBasedSegment, - } -} - -// UpdateType always returns UpdateTypeRuleBasedChange for RuleBasedUpdate messages -func (u *RuleBasedChangeUpdate) UpdateType() string { return UpdateTypeRuleBasedChange } - -// String returns the String representation of a split change notification -func (u *RuleBasedChangeUpdate) String() string { - return fmt.Sprintf("SplitChange(channel=%s,changeNumber=%d,timestamp=%d)", - u.Channel(), u.ChangeNumber(), u.Timestamp()) -} - -// PreviousChangeNumber returns previous change number -func (u *RuleBasedChangeUpdate) PreviousChangeNumber() *int64 { return u.previousChangeNumber } - -// FeatureFlag returns feature flag definiiton or nil -func (u *RuleBasedChangeUpdate) RuleBasedSegment() *RuleBasedSegmentDTO { return u.ruleBasedSegment } - // Compile-type assertions of interface requirements var _ Event = &AblyError{} var _ Message = &OccupancyMessage{} @@ -390,9 +360,7 @@ var _ Message = &SplitKillUpdate{} var _ Message = &SegmentChangeUpdate{} var _ Message = &ControlUpdate{} var _ Message = &LargeSegmentChangeUpdate{} -var _ Message = &RuleBasedChangeUpdate{} var _ Update = &SplitChangeUpdate{} var _ Update = &SplitKillUpdate{} var _ Update = &SegmentChangeUpdate{} var _ Update = &LargeSegmentChangeUpdate{} -var _ Update = &RuleBasedChangeUpdate{} diff --git a/push/processor.go b/push/processor.go index ecf854d7..d393364b 100644 --- a/push/processor.go +++ b/push/processor.go @@ -26,21 +26,19 @@ type Processor interface { ProcessSplitKillUpdate(update *dtos.SplitKillUpdate) error ProcessSegmentChangeUpdate(update *dtos.SegmentChangeUpdate) error ProcessLargeSegmentChangeUpdate(update *dtos.LargeSegmentChangeUpdate) error - ProcessorRuleBasedSegmentChangeUpdate(update *dtos.RuleBasedChangeUpdate) error StartWorkers() StopWorkers() } // ProcessorImpl struct for notification processor type ProcessorImpl struct { - segmentQueue chan dtos.SegmentChangeUpdate - splitQueue chan dtos.SplitChangeUpdate - ruleBasedQueue chan dtos.RuleBasedChangeUpdate - splitWorker *SplitUpdateWorker - segmentWorker *SegmentUpdateWorker - synchronizer synchronizerInterface - logger logging.LoggerInterface - largeSegment *LargeSegment + segmentQueue chan dtos.SegmentChangeUpdate + splitQueue chan dtos.SplitChangeUpdate + splitWorker *SplitUpdateWorker + segmentWorker *SegmentUpdateWorker + synchronizer synchronizerInterface + logger logging.LoggerInterface + largeSegment *LargeSegment } // NewProcessor creates new processor @@ -137,14 +135,6 @@ func (p *ProcessorImpl) ProcessLargeSegmentChangeUpdate(update *dtos.LargeSegmen return nil } -func (p *ProcessorImpl) ProcessorRuleBasedSegmentChangeUpdate(update *dtos.RuleBasedChangeUpdate) error { - if update == nil { - return errors.New("rule-based segment change update cannot be nil") - } - p.ruleBasedQueue <- *update - return nil -} - // StartWorkers enables split & segments workers func (p *ProcessorImpl) StartWorkers() { p.splitWorker.Start() From e3f472f10f3ce26de682cfb6bc3bd28da45a15e3 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Fri, 15 Aug 2025 12:03:44 -0300 Subject: [PATCH 6/9] Update test cases --- storage/mocks/rulebasedsegment.go | 65 ++++---- synchronizer/local_test.go | 35 ++-- synchronizer/synchronizer_test.go | 93 +++-------- synchronizer/worker/split/split_test.go | 211 ++++++------------------ tasks/splitsync_test.go | 16 +- 5 files changed, 123 insertions(+), 297 deletions(-) diff --git a/storage/mocks/rulebasedsegment.go b/storage/mocks/rulebasedsegment.go index 4ee0ddc7..e8fb7e7e 100644 --- a/storage/mocks/rulebasedsegment.go +++ b/storage/mocks/rulebasedsegment.go @@ -2,68 +2,73 @@ package mocks import ( "github.com/splitio/go-split-commons/v6/dtos" + "github.com/splitio/go-split-commons/v6/storage" + "github.com/stretchr/testify/mock" ) // MockSegmentStorage is a mocked implementation of Segment Storage type MockRuleBasedSegmentStorage struct { - ChangeNumberCall func() int64 - AllCall func() []dtos.RuleBasedSegmentDTO - RuleBasedSegmentNamesCall func() []string - ContainsCall func(ruleBasedSegmentNames []string) bool - GetSegmentsCall func() []string - CountCall func() int - GetRuleBasedSegmentByNameCall func(name string) (*dtos.RuleBasedSegmentDTO, error) - SetChangeNumberCall func(name string, till int64) - UpdateCall func(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) - ClearCall func() + mock.Mock } // ChangeNumber mock -func (m MockRuleBasedSegmentStorage) ChangeNumber() int64 { - return m.ChangeNumberCall() +func (m *MockRuleBasedSegmentStorage) ChangeNumber() int64 { + args := m.Called() + return int64(args.Int(0)) } // All mock -func (m MockRuleBasedSegmentStorage) All() []dtos.RuleBasedSegmentDTO { - return m.AllCall() +func (m *MockRuleBasedSegmentStorage) All() []dtos.RuleBasedSegmentDTO { + args := m.Called() + return args.Get(0).([]dtos.RuleBasedSegmentDTO) } // RuleBasedSegmentNames mock -func (m MockRuleBasedSegmentStorage) RuleBasedSegmentNames() []string { - return m.RuleBasedSegmentNamesCall() +func (m *MockRuleBasedSegmentStorage) RuleBasedSegmentNames() []string { + args := m.Called() + return args.Get(0).([]string) } // Contains mock -func (m MockRuleBasedSegmentStorage) Contains(ruleBasedSegmentNames []string) bool { - return m.ContainsCall(ruleBasedSegmentNames) +func (m *MockRuleBasedSegmentStorage) Contains(ruleBasedSegmentNames []string) bool { + args := m.Called(ruleBasedSegmentNames) + return bool(args.Bool(0)) } // GetSegments mock -func (m MockRuleBasedSegmentStorage) GetSegments() []string { - return m.GetSegmentsCall() +func (m *MockRuleBasedSegmentStorage) GetSegments() []string { + args := m.Called() + return args.Get(0).([]string) } // Count mock -func (m MockRuleBasedSegmentStorage) Count() int { - return m.CountCall() +func (m *MockRuleBasedSegmentStorage) Count() int { + args := m.Called() + return int(args.Int(0)) } // GetRuleBasedSegmentByName mock -func (m MockRuleBasedSegmentStorage) GetRuleBasedSegmentByName(name string) (*dtos.RuleBasedSegmentDTO, error) { - return m.GetRuleBasedSegmentByNameCall(name) +func (m *MockRuleBasedSegmentStorage) GetRuleBasedSegmentByName(name string) (*dtos.RuleBasedSegmentDTO, error) { + args := m.Called(name) + return args.Get(0).(*dtos.RuleBasedSegmentDTO), args.Error(1) } // SetChangeNumber mock -func (m MockRuleBasedSegmentStorage) SetChangeNumber(name string, till int64) { - m.SetChangeNumberCall(name, till) +func (m *MockRuleBasedSegmentStorage) SetChangeNumber(name string, till int64) { + m.Called(name, till) + return } // Update mock -func (m MockRuleBasedSegmentStorage) Update(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - m.UpdateCall(toAdd, toRemove, till) +func (m *MockRuleBasedSegmentStorage) Update(toAdd []dtos.RuleBasedSegmentDTO, toRemove []dtos.RuleBasedSegmentDTO, till int64) { + m.Called(toAdd, toRemove, till) + return } // Clear mock -func (m MockRuleBasedSegmentStorage) Clear() { - m.ClearCall() +func (m *MockRuleBasedSegmentStorage) Clear() { + m.Called() + return } + +var _ storage.RuleBasedSegmentsStorage = (*MockRuleBasedSegmentStorage)(nil) diff --git a/synchronizer/local_test.go b/synchronizer/local_test.go index ef89ed92..d690c420 100644 --- a/synchronizer/local_test.go +++ b/synchronizer/local_test.go @@ -15,6 +15,7 @@ import ( "github.com/splitio/go-split-commons/v6/storage/mocks" "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-toolkit/v5/logging" + "github.com/stretchr/testify/mock" ) func TestLocalSyncAllError(t *testing.T) { @@ -43,11 +44,9 @@ func TestLocalSyncAllError(t *testing.T) { } flagSetFilter := flagsets.NewFlagSetFilter(nil) - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + splitUpdater := split.NewSplitUpdater( splitMockStorage, ruleBasedSegmentMockStorage, @@ -83,7 +82,6 @@ func TestLocalSyncAllError(t *testing.T) { func TestLocalSyncAllOk(t *testing.T) { var splitFetchCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} logger := logging.NewLogger(&logging.LoggerOptions{}) @@ -115,12 +113,9 @@ func TestLocalSyncAllOk(t *testing.T) { } var notifyEventCalled int64 segmentMockStorage := mocks.MockSegmentStorage{} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { return -1 }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, RecordSuccessfulSyncCall: func(resource int, when time.Time) {}, @@ -142,14 +137,10 @@ func TestLocalSyncAllOk(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Errorf("It should be called once. Actual %d", notifyEventCalled) } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestLocalPeriodicFetching(t *testing.T) { var splitFetchCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} logger := logging.NewLogger(&logging.LoggerOptions{}) @@ -180,12 +171,9 @@ func TestLocalPeriodicFetching(t *testing.T) { }, } segmentMockStorage := mocks.MockSegmentStorage{} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { return -1 }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, RecordSuccessfulSyncCall: func(resource int, when time.Time) {}, @@ -207,7 +195,4 @@ func TestLocalPeriodicFetching(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Errorf("It should be called once. Actual %d", notifyEventCalled) } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 5ffa5052..1db85731 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -28,6 +28,7 @@ import ( "github.com/splitio/go-toolkit/v5/common" "github.com/splitio/go-toolkit/v5/datastructures/set" "github.com/splitio/go-toolkit/v5/logging" + "github.com/stretchr/testify/mock" ) func validReqParams(t *testing.T, fetchOptions service.RequestParams) { @@ -42,11 +43,9 @@ func validReqParams(t *testing.T, fetchOptions service.RequestParams) { } func createSplitUpdater(splitMockStorage storageMock.MockSplitStorage, splitAPI api.SplitAPI, logger logging.LoggerInterface, telemetryMockStorage storageMock.MockTelemetryStorage, appMonitorMock hcMock.MockApplicationMonitor) split.Updater { - ruleBasedSegmentMockStorage := storageMock.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { return -1 }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(-1) splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) return splitUpdater @@ -77,9 +76,8 @@ func TestSyncAllErrorSplits(t *testing.T) { }, } advanced := conf.AdvancedConfig{EventsQueueSize: 100, EventsBulkSize: 100, HTTPTimeout: 100, ImpressionsBulkSize: 100, ImpressionsQueueSize: 100, SegmentQueueSize: 50, SegmentWorkers: 5} - ruleBasedSegmentMockStorage := storageMock.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { return -1 }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) @@ -560,11 +558,8 @@ func TestSplitUpdateWorkerCNGreaterThanFFChange(t *testing.T) { telemetryMockStorage := storageMock.MockTelemetryStorage{} appMonitorMock := hcMock.MockApplicationMonitor{} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -619,11 +614,8 @@ func TestSplitUpdateWorkerStorageCNEqualsFFCN(t *testing.T) { telemetryMockStorage := storageMock.MockTelemetryStorage{} appMonitorMock := hcMock.MockApplicationMonitor{} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -685,11 +677,8 @@ func TestSplitUpdateWorkerFFPcnEqualsFFNotNil(t *testing.T) { telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -731,7 +720,6 @@ func TestSplitUpdateWorkerFFPcnEqualsFFNotNil(t *testing.T) { func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 - var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -774,14 +762,9 @@ func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -814,15 +797,11 @@ func TestSplitUpdateWorkerGetCNFromStorageError(t *testing.T) { if u := atomic.LoadInt64(&updateCalled); u != 1 { t.Error("should have been called once. got: ", u) } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestSplitUpdateWorkerFFIsNil(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 - var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -855,14 +834,9 @@ func TestSplitUpdateWorkerFFIsNil(t *testing.T) { hcMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -894,15 +868,11 @@ func TestSplitUpdateWorkerFFIsNil(t *testing.T) { if u := atomic.LoadInt64(&updateCalled); u != 1 { t.Error("should have been called once. got: ", u) } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { var splitFetchCalled int64 var updateCalled int64 - var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) splitAPI := api.SplitAPI{ SplitFetcher: httpMocks.MockSplitFetcher{ @@ -937,14 +907,9 @@ func TestSplitUpdateWorkerFFPcnDifferentStorageCN(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, hcMonitorMock, flagsets.NewFlagSetFilter(nil)), @@ -995,11 +960,8 @@ func TestLocalKill(t *testing.T) { } }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, storageMock.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)), @@ -1010,7 +972,6 @@ func TestLocalKill(t *testing.T) { syncForTest := NewSynchronizer(conf.AdvancedConfig{}, splitTasks, workers, logger, nil) syncForTest.LocalKill("split", "default_treatment", 123456789) } - func TestSplitUpdateWithReferencedSegments(t *testing.T) { var ffUpdateCalled int64 var segmentUpdateCalled int64 @@ -1054,6 +1015,7 @@ func TestSplitUpdateWithReferencedSegments(t *testing.T) { t.Error("the segment name should be segment1") } return -1, nil + }, UpdateCall: func(name string, toAdd *set.ThreadUnsafeSet, toRemove *set.ThreadUnsafeSet, changeNumber int64) error { atomic.AddInt64(&segmentUpdateCalled, 1) @@ -1074,11 +1036,8 @@ func TestSplitUpdateWithReferencedSegments(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Maybe().Return(-1) workers := Workers{ SplitUpdater: split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitAPI.SplitFetcher, logger, telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)), diff --git a/synchronizer/worker/split/split_test.go b/synchronizer/worker/split/split_test.go index a153fb42..1e376e57 100644 --- a/synchronizer/worker/split/split_test.go +++ b/synchronizer/worker/split/split_test.go @@ -19,6 +19,7 @@ import ( "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/common" "github.com/splitio/go-toolkit/v5/logging" + "github.com/stretchr/testify/mock" ) func validReqParams(t *testing.T, fetchOptions service.RequestParams, till string) { @@ -48,11 +49,8 @@ func TestSplitSynchronizerError(t *testing.T) { }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) telemetryMockStorage := mocks.MockTelemetryStorage{ RecordSyncErrorCall: func(resource, status int) { @@ -110,11 +108,8 @@ func TestSplitSynchronizerErrorScRequestURITooLong(t *testing.T) { } }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) appMonitorMock := hcMock.MockApplicationMonitor{ NotifyEventCall: func(counterType int) { atomic.AddInt64(¬ifyEventCalled, 1) @@ -141,7 +136,6 @@ func TestSplitSynchronizer(t *testing.T) { mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: true, Status: "INACTIVE", TrafficTypeName: "one"} var notifyEventCalled int64 - var updateRBCalled int64 splitMockStorage := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -206,15 +200,9 @@ func TestSplitSynchronizer(t *testing.T) { atomic.AddInt64(¬ifyEventCalled, 1) }, } - - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) splitUpdater := NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) @@ -225,15 +213,11 @@ func TestSplitSynchronizer(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called once") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should be called once") - } } func TestSplitSyncProcess(t *testing.T) { var call int64 var notifyEventCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: true, Status: "INACTIVE", TrafficTypeName: "one"} @@ -284,14 +268,9 @@ func TestSplitSyncProcess(t *testing.T) { splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Times(3).Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) res, err := splitUpdater.SynchronizeSplits(nil) @@ -353,16 +332,11 @@ func TestSplitSyncProcess(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 2 { t.Error("It should be called twice") } - if atomic.LoadInt64(&updateRBCalled) != 2 { - t.Error("It should be called twice") - } } func TestSplitTill(t *testing.T) { var call int64 var notifyEventCalled int64 - var updateRBCalled int64 - var changeNumberRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedRuleBased1 := dtos.RuleBasedSegmentDTO{Name: "rb1", Status: "ACTIVE"} @@ -390,18 +364,9 @@ func TestSplitTill(t *testing.T) { splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - atomic.AddInt64(&changeNumberRBCalled, 1) - if changeNumberRBCalled == 1 { - return -1 - } - return changeNumberRBCalled - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Times(12).Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Times(12).Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) var till int64 = 1 @@ -413,24 +378,17 @@ func TestSplitTill(t *testing.T) { if err != nil { t.Error("It should not return err") } - if atomic.LoadInt64(&call) != 1 { + if atomic.LoadInt64(&call) != 2 { t.Error("It should be called once") } if atomic.LoadInt64(¬ifyEventCalled) != 2 { t.Error("It should be called twice") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should be called once") - } - if atomic.LoadInt64(&changeNumberRBCalled) != 3 { - t.Error("It should be called twice") - } } func TestByPassingCDN(t *testing.T) { var call int64 var notifyEventCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} splitMockFetcher := fetcherMock.MockSplitFetcher{ @@ -477,14 +435,9 @@ func TestByPassingCDN(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagsets.NewFlagSetFilter(nil)) splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Times(13).Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Times(13).Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 @@ -501,16 +454,12 @@ func TestByPassingCDN(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called twice instead of", atomic.LoadInt64(¬ifyEventCalled)) } - if atomic.LoadInt64(&updateRBCalled) != 12 { - t.Error("It should be called twice instead of", atomic.LoadInt64(&updateRBCalled)) - } } func TestByPassingCDNLimit(t *testing.T) { var call int64 var notifyEventCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} splitMockFetcher := fetcherMock.MockSplitFetcher{ @@ -557,14 +506,9 @@ func TestByPassingCDNLimit(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagsets.NewFlagSetFilter(nil)) splitStorage.Update([]dtos.SplitDTO{{}}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Times(22).Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Times(22).Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) splitUpdater.onDemandFetchBackoffBase = 1 @@ -581,9 +525,6 @@ func TestByPassingCDNLimit(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) != 1 { t.Error("It should be called twice instead of", atomic.LoadInt64(¬ifyEventCalled)) } - if atomic.LoadInt64(&updateRBCalled) != 21 { - t.Error("It should be called twenty one times instead of", atomic.LoadInt64(&updateRBCalled)) - } } func TestProcessFFChange(t *testing.T) { @@ -601,11 +542,8 @@ func TestProcessFFChange(t *testing.T) { }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} @@ -625,7 +563,6 @@ func TestProcessFFChange(t *testing.T) { func TestAddOrUpdateFeatureFlagNil(t *testing.T) { var fetchCallCalled int64 - var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) ffStorageMock := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -648,14 +585,9 @@ func TestAddOrUpdateFeatureFlagNil(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) @@ -665,9 +597,6 @@ func TestAddOrUpdateFeatureFlagNil(t *testing.T) { if atomic.LoadInt64(&fetchCallCalled) != 1 { t.Error("Fetch should be called once") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("Fetch should be called once") - } } func TestAddOrUpdateFeatureFlagPcnEquals(t *testing.T) { @@ -698,11 +627,8 @@ func TestAddOrUpdateFeatureFlagPcnEquals(t *testing.T) { }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} @@ -749,11 +675,8 @@ func TestAddOrUpdateFeatureFlagArchive(t *testing.T) { }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) telemetryStorage, _ := inmemory.NewTelemetryStorage() appMonitorMock := hcMock.MockApplicationMonitor{} @@ -774,7 +697,6 @@ func TestAddOrUpdateFeatureFlagArchive(t *testing.T) { func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { var fetchCallCalled int64 var updateCalled int64 - var updateRBCalled int64 logger := logging.NewLogger(&logging.LoggerOptions{}) ffStorageMock := mocks.MockSplitStorage{ ChangeNumberCall: func() (int64, error) { @@ -802,14 +724,10 @@ func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { NotifyEventCall: func(counterType int) {}, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) + fetcher := NewSplitUpdater(ffStorageMock, ruleBasedSegmentMockStorage, splitMockFetcher, logger, telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) fetcher.SynchronizeFeatureFlags(dtos.NewSplitChangeUpdate( @@ -821,9 +739,6 @@ func TestAddOrUpdateFFCNFromStorageError(t *testing.T) { if atomic.LoadInt64(&updateCalled) != 1 { t.Error("It should update the storage") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestGetActiveFF(t *testing.T) { @@ -832,11 +747,8 @@ func TestGetActiveFF(t *testing.T) { featureFlags = append(featureFlags, dtos.SplitDTO{Status: Active}) featureFlagChanges := &dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) @@ -856,11 +768,8 @@ func TestGetInactiveFF(t *testing.T) { featureFlags = append(featureFlags, dtos.SplitDTO{Status: Archived}) featureFlagChanges := &dtos.SplitChangesDTO{FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) @@ -881,11 +790,8 @@ func TestGetActiveAndInactiveFF(t *testing.T) { featureFlagChanges := &dtos.SplitChangesDTO{ FeatureFlags: dtos.FeatureFlagsDTO{Splits: featureFlags}} - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) s := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, nil, mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) actives, inactives := s.processFeatureFlagChanges(featureFlagChanges) @@ -901,7 +807,6 @@ func TestGetActiveAndInactiveFF(t *testing.T) { func TestSplitSyncWithSets(t *testing.T) { var call int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set1", "set2"}} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set4"}} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set5", "set1"}} @@ -934,14 +839,9 @@ func TestSplitSyncWithSets(t *testing.T) { splitStorage.Update([]dtos.SplitDTO{}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagsets.NewFlagSetFilter([]string{"set1", "set2", "set3"})) @@ -963,14 +863,10 @@ func TestSplitSyncWithSets(t *testing.T) { if splitStorage.Split("split3") == nil { t.Error("split3 should be present") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestSplitSyncWithSetsInConfig(t *testing.T) { var call int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set1"}} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set4"}} mockedSplit3 := dtos.SplitDTO{Name: "split3", Killed: false, Status: "ACTIVE", TrafficTypeName: "one", Sets: []string{"set5", "set2"}} @@ -1005,14 +901,9 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { splitStorage := mutexmap.NewMMSplitStorage(flagSetFilter) splitStorage.Update([]dtos.SplitDTO{}, nil, -1) telemetryStorage, _ := inmemory.NewTelemetryStorage() - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) splitUpdater := NewSplitUpdater(splitStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryStorage, appMonitorMock, flagSetFilter) @@ -1041,17 +932,11 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { if s4 == nil { t.Error("split4 should be present") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } func TestProcessMatchers(t *testing.T) { - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) splitUpdater := NewSplitUpdater(mocks.MockSplitStorage{}, ruleBasedSegmentMockStorage, fetcherMock.MockSplitFetcher{}, logging.NewLogger(nil), mocks.MockTelemetryStorage{}, hcMock.MockApplicationMonitor{}, flagsets.NewFlagSetFilter(nil)) splitChange := &dtos.SplitChangesDTO{ diff --git a/tasks/splitsync_test.go b/tasks/splitsync_test.go index 452f2c98..5723f8d3 100644 --- a/tasks/splitsync_test.go +++ b/tasks/splitsync_test.go @@ -15,12 +15,12 @@ import ( "github.com/splitio/go-split-commons/v6/synchronizer/worker/split" "github.com/splitio/go-split-commons/v6/telemetry" "github.com/splitio/go-toolkit/v5/logging" + "github.com/stretchr/testify/mock" ) func TestSplitSyncTask(t *testing.T) { var call int64 var notifyEventCalled int64 - var updateRBCalled int64 mockedSplit1 := dtos.SplitDTO{Name: "split1", Killed: false, Status: "ACTIVE", TrafficTypeName: "one"} mockedSplit2 := dtos.SplitDTO{Name: "split2", Killed: true, Status: "ACTIVE", TrafficTypeName: "two"} @@ -91,14 +91,9 @@ func TestSplitSyncTask(t *testing.T) { }, } - ruleBasedSegmentMockStorage := mocks.MockRuleBasedSegmentStorage{ - ChangeNumberCall: func() int64 { - return -1 - }, - UpdateCall: func(toAdd, toRemove []dtos.RuleBasedSegmentDTO, till int64) { - atomic.AddInt64(&updateRBCalled, 1) - }, - } + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Once().Return(-1) splitUpdater := split.NewSplitUpdater(splitMockStorage, ruleBasedSegmentMockStorage, splitMockFetcher, logging.NewLogger(&logging.LoggerOptions{}), telemetryMockStorage, appMonitorMock, flagsets.NewFlagSetFilter(nil)) @@ -125,7 +120,4 @@ func TestSplitSyncTask(t *testing.T) { if atomic.LoadInt64(¬ifyEventCalled) < 1 { t.Error("It should be called at least once") } - if atomic.LoadInt64(&updateRBCalled) != 1 { - t.Error("It should update the storage") - } } From 3c40da3e12cdb61d1929ca9ffc1b319f071aefc8 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Fri, 15 Aug 2025 14:48:09 -0300 Subject: [PATCH 7/9] Add test cases --- dtos/notification.go | 4 +- synchronizer/worker/split/split_test.go | 271 ++++++++++++++++++++++++ 2 files changed, 273 insertions(+), 2 deletions(-) diff --git a/dtos/notification.go b/dtos/notification.go index 0d98174e..c977bd21 100644 --- a/dtos/notification.go +++ b/dtos/notification.go @@ -221,10 +221,10 @@ func NewRuleBasedSegmentChangeUpdate(baseUpdate BaseUpdate, pcn *int64, ruleBase } } -// UpdateType always returns UpdateTypeSplitChange for SplitUpdate messages +// UpdateType returns the type of update func (u *SplitChangeUpdate) UpdateType() string { if u.ruleBasedSegment != nil { - return TypeRuleBased + return UpdateTypeRuleBasedChange } return UpdateTypeSplitChange } diff --git a/synchronizer/worker/split/split_test.go b/synchronizer/worker/split/split_test.go index 1e376e57..07f4b616 100644 --- a/synchronizer/worker/split/split_test.go +++ b/synchronizer/worker/split/split_test.go @@ -934,6 +934,277 @@ func TestSplitSyncWithSetsInConfig(t *testing.T) { } } +func TestSynchronizeSplitsWithLowerTill(t *testing.T) { + // Mock split storage with higher change number + currentSince := int64(100) + splitMockStorage := mocks.MockSplitStorage{ + ChangeNumberCall: func() (int64, error) { return currentSince, nil }, + UpdateCall: func(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) {}, + } + + // Mock rule based segment storage with higher change number + currentRBSince := int64(150) + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Return(int(currentRBSince)) + ruleBasedSegmentMockStorage.On("Update", mock.Anything, mock.Anything, mock.Anything).Return() + + // Mock fetcher + var fetchCalled bool + splitMockFetcher := fetcherMock.MockSplitFetcher{ + FetchCall: func(requestParams *service.FlagRequestParams) (*dtos.SplitChangesDTO, error) { + fetchCalled = true + return &dtos.SplitChangesDTO{ + FeatureFlags: dtos.FeatureFlagsDTO{ + Since: currentSince, + Till: currentSince, + Splits: []dtos.SplitDTO{}, + }, + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ + Since: currentRBSince, + Till: currentRBSince, + RuleBasedSegments: []dtos.RuleBasedSegmentDTO{}, + }, + }, nil + }, + } + + // Mock telemetry storage + telemetryMockStorage := mocks.MockTelemetryStorage{ + RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, + RecordSuccessfulSyncCall: func(resource int, timestamp time.Time) {}, + } + + // Mock app monitor + appMonitorMock := hcMock.MockApplicationMonitor{ + NotifyEventCall: func(counterType int) {}, + } + + // Create split updater + splitUpdater := NewSplitUpdater( + splitMockStorage, + ruleBasedSegmentMockStorage, + splitMockFetcher, + logging.NewLogger(&logging.LoggerOptions{}), + telemetryMockStorage, + appMonitorMock, + flagsets.NewFlagSetFilter(nil), + ) + + // Test case 1: till is less than both currentSince and currentRBSince + till := int64(50) + result, err := splitUpdater.SynchronizeSplits(&till) + + if err != nil { + t.Error("Expected no error, got:", err) + } + + if result == nil { + t.Error("Expected non-nil result") + } + + if fetchCalled { + t.Error("Fetcher should not have been called when till is less than both currentSince and currentRBSince") + } + + // Test case 2: till is equal to currentSince but less than currentRBSince + till = currentSince + result, err = splitUpdater.SynchronizeSplits(&till) + + if err != nil { + t.Error("Expected no error when till equals currentSince, got:", err) + } + + if !fetchCalled { + t.Error("Fetcher should have been called when till equals currentSince (since currentRBSince is higher)") + } + + // Test case 3: till is equal to currentRBSince but greater than currentSince + till = currentRBSince + result, err = splitUpdater.SynchronizeSplits(&till) + + if err != nil { + t.Error("Expected no error when till equals currentRBSince, got:", err) + } + + if !fetchCalled { + t.Error("Fetcher should have been called when till equals currentRBSince") + } +} + +func TestSynchronizeFeatureFlagsRuleBasedUpdate(t *testing.T) { + // Mock rule based segment storage with testify/mock + ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} + + // Mock split storage + splitMockStorage := mocks.MockSplitStorage{ + ChangeNumberCall: func() (int64, error) { return 200, nil }, + UpdateCall: func(toAdd []dtos.SplitDTO, toRemove []dtos.SplitDTO, changeNumber int64) {}, + AllCall: func() []dtos.SplitDTO { return []dtos.SplitDTO{} }, + } + + // Mock fetcher + var fetchCalled bool + var fetchCount int + splitMockFetcher := fetcherMock.MockSplitFetcher{ + FetchCall: func(requestParams *service.FlagRequestParams) (*dtos.SplitChangesDTO, error) { + fetchCalled = true + fetchCount++ + if fetchCount == 1 { + return &dtos.SplitChangesDTO{ + FeatureFlags: dtos.FeatureFlagsDTO{ + Since: 100, + Till: 200, + Splits: []dtos.SplitDTO{}, + }, + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ + Since: 100, + Till: 200, + RuleBasedSegments: []dtos.RuleBasedSegmentDTO{}, + }, + }, nil + } + return &dtos.SplitChangesDTO{ + FeatureFlags: dtos.FeatureFlagsDTO{ + Since: 200, + Till: 200, + Splits: []dtos.SplitDTO{}, + }, + RuleBasedSegments: dtos.RuleBasedSegmentsDTO{ + Since: 200, + Till: 200, + RuleBasedSegments: []dtos.RuleBasedSegmentDTO{}, + }, + }, nil + }, + } + + // Mock telemetry storage + telemetryMockStorage := mocks.MockTelemetryStorage{ + RecordSyncLatencyCall: func(resource int, latency time.Duration) {}, + RecordSuccessfulSyncCall: func(resource int, timestamp time.Time) {}, + RecordSyncErrorCall: func(resource, status int) {}, + } + + // Mock app monitor + appMonitorMock := hcMock.MockApplicationMonitor{ + NotifyEventCall: func(counterType int) {}, + } + + // Create split updater + splitUpdater := NewSplitUpdater( + splitMockStorage, + ruleBasedSegmentMockStorage, + splitMockFetcher, + logging.NewLogger(&logging.LoggerOptions{}), + telemetryMockStorage, + appMonitorMock, + flagsets.NewFlagSetFilter(nil), + ) + + // Test case 1: When rule-based segment change number is lower than current + lowerChangeNumber := int64(100) + ruleBasedSegment := &dtos.RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: lowerChangeNumber, + Conditions: []dtos.RuleBasedConditionDTO{ + { + ConditionType: "WHITELIST", + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{}, + }, + }, + }, + } + baseMessage := dtos.NewBaseMessage(time.Now().Unix(), "test-channel") + baseUpdate := dtos.NewBaseUpdate(baseMessage, lowerChangeNumber) + ffChange := *dtos.NewRuleBasedSegmentChangeUpdate(baseUpdate, nil, ruleBasedSegment) + + // Reset fetchCalled + fetchCalled = false + + // Set up expectations for the first test case + ruleBasedSegmentMockStorage.On("ChangeNumber").Return(int(200)) + ruleBasedSegmentMockStorage.On("Update", + mock.MatchedBy(func(toAdd []dtos.RuleBasedSegmentDTO) bool { return len(toAdd) == 0 }), + mock.MatchedBy(func(toRemove []dtos.RuleBasedSegmentDTO) bool { return len(toRemove) == 0 }), + int64(200)).Return() + + result, err := splitUpdater.SynchronizeFeatureFlags(&ffChange) + + if err != nil { + t.Error("Expected no error, got:", err) + } + + if result.RequiresFetch { + t.Error("Expected RequiresFetch to be false when change number is lower than current") + } + + if fetchCalled { + t.Error("Fetcher should not have been called when change number is lower than current") + } + + // Test case 2: When rule-based segment change number is higher than current + higherChangeNumber := int64(300) + ruleBasedSegment = &dtos.RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: higherChangeNumber, + Conditions: []dtos.RuleBasedConditionDTO{ + { + ConditionType: "WHITELIST", + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{}, + }, + }, + }, + } + baseMessage = dtos.NewBaseMessage(time.Now().Unix(), "test-channel") + baseUpdate = dtos.NewBaseUpdate(baseMessage, higherChangeNumber) + ffChange = *dtos.NewRuleBasedSegmentChangeUpdate(baseUpdate, nil, ruleBasedSegment) + + // Reset fetchCalled + fetchCalled = false + + // Set up expectations for the second test case + ruleBasedSegmentMockStorage = &mocks.MockRuleBasedSegmentStorage{} + ruleBasedSegmentMockStorage.On("ChangeNumber").Return(int(100)) + ruleBasedSegmentMockStorage.On("Update", + mock.MatchedBy(func(toAdd []dtos.RuleBasedSegmentDTO) bool { return len(toAdd) == 1 && toAdd[0].ChangeNumber == higherChangeNumber }), + mock.MatchedBy(func(toRemove []dtos.RuleBasedSegmentDTO) bool { return len(toRemove) == 0 }), + higherChangeNumber).Return() + + // Create a new split updater for the second test case + splitUpdater = NewSplitUpdater( + splitMockStorage, + ruleBasedSegmentMockStorage, + splitMockFetcher, + logging.NewLogger(&logging.LoggerOptions{}), + telemetryMockStorage, + appMonitorMock, + flagsets.NewFlagSetFilter(nil), + ) + + result, err = splitUpdater.SynchronizeFeatureFlags(&ffChange) + + if err != nil { + t.Error("Expected no error, got:", err) + } + + if result.RequiresFetch { + t.Error("Expected RequiresFetch to be false when change number is higher than current") + } + + if fetchCalled { + t.Error("Fetcher should not have been called when change number is higher than current") + } + + if result.NewChangeNumber != higherChangeNumber { + t.Errorf("Expected NewChangeNumber to be %d, got %d", higherChangeNumber, result.NewChangeNumber) + } + + // Verify that the rule-based segment storage was updated with the higher change number + ruleBasedSegmentMockStorage.AssertCalled(t, "Update", mock.Anything, mock.Anything, higherChangeNumber) +} + func TestProcessMatchers(t *testing.T) { ruleBasedSegmentMockStorage := &mocks.MockRuleBasedSegmentStorage{} ruleBasedSegmentMockStorage.On("ChangeNumber").Twice().Return(-1) From a4defe8052608d80f7d8965f9515feefbf141227 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Fri, 15 Aug 2025 15:07:49 -0300 Subject: [PATCH 8/9] Add test cases for notification --- dtos/notification_test.go | 66 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/dtos/notification_test.go b/dtos/notification_test.go index d1e48591..f8604328 100644 --- a/dtos/notification_test.go +++ b/dtos/notification_test.go @@ -226,6 +226,72 @@ func TestControlUpdate(t *testing.T) { } } +func TestRuleBasedSegmentChangeUpdate(t *testing.T) { + // Test case 1: With rule-based segment data + ruleBasedSegment := &RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: 123456, + Conditions: []RuleBasedConditionDTO{ + { + ConditionType: "WHITELIST", + MatcherGroup: MatcherGroupDTO{ + Matchers: []MatcherDTO{}, + }, + }, + }, + } + + rbUpdate := NewRuleBasedSegmentChangeUpdate(NewBaseUpdate(NewBaseMessage(123456789, "rb_channel"), 123456), nil, ruleBasedSegment) + if rbUpdate.EventType() != SSEEventTypeMessage { + t.Error("Unexpected EventType") + } + if rbUpdate.Timestamp() != 123456789 { + t.Error("Unexpected Timestamp") + } + if rbUpdate.Channel() != "rb_channel" { + t.Error("Unexpected Channel") + } + if rbUpdate.MessageType() != MessageTypeUpdate { + t.Error("Unexpected MessageType") + } + if rbUpdate.ChangeNumber() != 123456 { + t.Error("Unexpected ChangeNumber") + } + if rbUpdate.UpdateType() != UpdateTypeRuleBasedChange { + t.Error("Unexpected UpdateType, got:", rbUpdate.UpdateType()) + } + if rbUpdate.String() != "SplitChange(channel=rb_channel,changeNumber=123456,timestamp=123456789)" { + t.Error("Unexpected String", rbUpdate.String()) + } + if rbUpdate.RuleBasedSegment() == nil { + t.Error("RuleBasedSegment should not be nil") + } + if rbUpdate.RuleBasedSegment().Name != "test-segment" { + t.Error("Unexpected RuleBasedSegment name") + } + if rbUpdate.RuleBasedSegment().ChangeNumber != 123456 { + t.Error("Unexpected RuleBasedSegment change number") + } + if len(rbUpdate.RuleBasedSegment().Conditions) != 1 { + t.Error("RuleBasedSegment should have 1 condition") + } + if rbUpdate.RuleBasedSegment().Conditions[0].ConditionType != "WHITELIST" { + t.Error("Unexpected condition type") + } + + // Test case 2: Without rule-based segment data + rbUpdateNoSegment := NewRuleBasedSegmentChangeUpdate(NewBaseUpdate(NewBaseMessage(123456789, "rb_channel"), 123456), nil, nil) + if rbUpdateNoSegment.EventType() != SSEEventTypeMessage { + t.Error("Unexpected EventType for no segment case") + } + if rbUpdateNoSegment.UpdateType() != UpdateTypeSplitChange { + t.Error("Unexpected UpdateType for no segment case, got:", rbUpdateNoSegment.UpdateType()) + } + if rbUpdateNoSegment.RuleBasedSegment() != nil { + t.Error("RuleBasedSegment should be nil for no segment case") + } +} + func TestLargeSegmentChangeUpdate(t *testing.T) { ls := []LargeSegmentRFDResponseDTO{ { From 85b00183a7327e5f5014d357e9db339a378380a2 Mon Sep 17 00:00:00 2001 From: Nadia Mayor Date: Fri, 15 Aug 2025 15:40:20 -0300 Subject: [PATCH 9/9] Add test cases --- engine/validator/matchers_test.go | 54 ++++++++++++++ push/parser_test.go | 117 +++++++++++++++++++++++++++++- 2 files changed, 170 insertions(+), 1 deletion(-) diff --git a/engine/validator/matchers_test.go b/engine/validator/matchers_test.go index feffd474..08f6e2a0 100644 --- a/engine/validator/matchers_test.go +++ b/engine/validator/matchers_test.go @@ -9,6 +9,60 @@ import ( "github.com/splitio/go-toolkit/v5/logging" ) +func TestProcessRBMatchers(t *testing.T) { + // Test case 1: Rule-based segment with unsupported matcher + ruleBased := &dtos.RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: 123, + Conditions: []dtos.RuleBasedConditionDTO{ + { + ConditionType: grammar.ConditionTypeRollout, + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + {MatcherType: "NEW_MATCHER", KeySelector: nil}, + }, + }, + }, + }, + } + ProcessRBMatchers(ruleBased, logging.NewLogger(nil)) + if len(ruleBased.Conditions) != 1 { + t.Error("Conditions should have been overridden") + } + if ruleBased.Conditions[0].ConditionType != grammar.ConditionTypeWhitelist { + t.Error("ConditionType should be WHITELIST") + } + if ruleBased.Conditions[0].MatcherGroup.Matchers[0].MatcherType != grammar.MatcherTypeAllKeys { + t.Error("MatcherType should be ALL_KEYS") + } + + // Test case 2: Rule-based segment with supported matcher + ruleBased = &dtos.RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: 123, + Conditions: []dtos.RuleBasedConditionDTO{ + { + ConditionType: grammar.ConditionTypeRollout, + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{ + {MatcherType: grammar.MatcherTypeEndsWith, KeySelector: nil, String: common.StringRef("test")}, + }, + }, + }, + }, + } + ProcessRBMatchers(ruleBased, logging.NewLogger(nil)) + if len(ruleBased.Conditions) != 1 { + t.Error("Conditions should not have been overridden") + } + if ruleBased.Conditions[0].ConditionType != grammar.ConditionTypeRollout { + t.Error("ConditionType should be ROLLOUT") + } + if ruleBased.Conditions[0].MatcherGroup.Matchers[0].MatcherType != grammar.MatcherTypeEndsWith { + t.Error("MatcherType should be ENDS_WITH") + } +} + func TestProcessMatchers(t *testing.T) { split := &dtos.SplitDTO{ Conditions: []dtos.ConditionDTO{ diff --git a/push/parser_test.go b/push/parser_test.go index 4591fc1d..94d7d422 100644 --- a/push/parser_test.go +++ b/push/parser_test.go @@ -6,6 +6,7 @@ import ( "github.com/splitio/go-split-commons/v6/dtos" "github.com/splitio/go-toolkit/v5/common" + "github.com/splitio/go-toolkit/v5/datautils" "github.com/splitio/go-toolkit/v5/logging" sseMocks "github.com/splitio/go-toolkit/v5/sse/mocks" ) @@ -42,7 +43,8 @@ func TestParseSplitUpdate(t *testing.T) { logger := logging.NewLogger(nil) parser := &NotificationParserImpl{ - logger: logger, + logger: logger, + dataUtils: NewDataUtilsImpl(), onSplitUpdate: func(u *dtos.SplitChangeUpdate) error { if u.ChangeNumber() != 123 { t.Error(CN_SHOULD_BE_123, u.ChangeNumber()) @@ -308,6 +310,119 @@ func TestParseInstantFFCompressTypeGreaterTwo(t *testing.T) { } } +func TestParseRuleBasedSegmentUpdate(t *testing.T) { + // Test case 1: With rule-based segment data + event := &sseMocks.RawEventMock{ + IDCall: func() string { return "abc" }, + EventCall: func() string { return dtos.SSEEventTypeMessage }, + DataCall: func() string { + ruleBasedSegment := dtos.RuleBasedSegmentDTO{ + Name: "test-segment", + ChangeNumber: 123, + Conditions: []dtos.RuleBasedConditionDTO{ + { + ConditionType: "WHITELIST", + MatcherGroup: dtos.MatcherGroupDTO{ + Matchers: []dtos.MatcherDTO{}, + }, + }, + }, + } + ruleBasedJSON, _ := json.Marshal(ruleBasedSegment) + base64JSON, _ := datautils.Encode(ruleBasedJSON, datautils.Base64) + updateJSON, _ := json.Marshal(genericMessageData{ + Type: dtos.UpdateTypeRuleBasedChange, + ChangeNumber: 123, + PreviousChangeNumber: 100, + CompressType: common.IntRef(0), + Definition: common.StringRef(base64JSON), + }) + mainJSON, _ := json.Marshal(genericData{ + Timestamp: 123, + Data: string(updateJSON), + Channel: "sarasa_splits", + }) + return string(mainJSON) + }, + IsErrorCall: func() bool { return false }, + IsEmptyCall: func() bool { return false }, + RetryCall: func() int64 { return 0 }, + } + + logger := logging.NewLogger(nil) + parser := &NotificationParserImpl{ + logger: logger, + dataUtils: NewDataUtilsImpl(), + onSplitUpdate: func(u *dtos.SplitChangeUpdate) error { + if u.ChangeNumber() != 123 { + t.Error(CN_SHOULD_BE_123, u.ChangeNumber()) + } + if u.Channel() != "sarasa_splits" { + t.Error(CHANNEL_SHOULD_BE, u.Channel()) + } + if u.UpdateType() != dtos.UpdateTypeRuleBasedChange { + t.Error("update type should be RB_SEGMENT_UPDATE. Is: ", u.UpdateType()) + } + if u.RuleBasedSegment() == nil { + t.Error("rule-based segment should not be nil") + } + if u.RuleBasedSegment().Name != "test-segment" { + t.Error("rule-based segment name should be test-segment. Is: ", u.RuleBasedSegment().Name) + } + return nil + }, + } + + if status, err := parser.ParseAndForward(event); status != nil || err != nil { + t.Error(ERROR_SHOULD_RETURNED, err) + } + + // Test case 2: Without rule-based segment data + eventNoSegment := &sseMocks.RawEventMock{ + IDCall: func() string { return "abc" }, + EventCall: func() string { return dtos.SSEEventTypeMessage }, + DataCall: func() string { + updateJSON, _ := json.Marshal(genericMessageData{ + Type: dtos.UpdateTypeRuleBasedChange, + ChangeNumber: 123, + }) + mainJSON, _ := json.Marshal(genericData{ + Timestamp: 123, + Data: string(updateJSON), + Channel: "sarasa_splits", + }) + return string(mainJSON) + }, + IsErrorCall: func() bool { return false }, + IsEmptyCall: func() bool { return false }, + RetryCall: func() int64 { return 0 }, + } + + parserNoSegment := &NotificationParserImpl{ + logger: logger, + dataUtils: NewDataUtilsImpl(), + onSplitUpdate: func(u *dtos.SplitChangeUpdate) error { + if u.ChangeNumber() != 123 { + t.Error(CN_SHOULD_BE_123, u.ChangeNumber()) + } + if u.Channel() != "sarasa_splits" { + t.Error(CHANNEL_SHOULD_BE, u.Channel()) + } + if u.UpdateType() != dtos.UpdateTypeSplitChange { + t.Error("update type should be SPLIT_UPDATE. Is: ", u.UpdateType()) + } + if u.RuleBasedSegment() != nil { + t.Error("rule-based segment should be nil") + } + return nil + }, + } + + if status, err := parserNoSegment.ParseAndForward(eventNoSegment); status != nil || err != nil { + t.Error(ERROR_SHOULD_RETURNED, err) + } +} + func TestParseSplitKill(t *testing.T) { event := &sseMocks.RawEventMock{ IDCall: func() string { return "abc" },