-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
refactor(cloudevents-server): add kafka topic to store event (#165)
Signed-off-by: wuhuizuo <[email protected]> Signed-off-by: wuhuizuo <[email protected]>
- Loading branch information
Showing
10 changed files
with
455 additions
and
49 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,173 @@ | ||
package handler | ||
|
||
import ( | ||
"context" | ||
"os" | ||
"os/signal" | ||
"sync" | ||
"syscall" | ||
|
||
cloudevents "github.com/cloudevents/sdk-go/v2" | ||
"github.com/rs/zerolog/log" | ||
kafka "github.com/segmentio/kafka-go" | ||
|
||
"github.com/PingCAP-QE/ee-apps/cloudevents-server/pkg/config" | ||
skakfa "github.com/PingCAP-QE/ee-apps/cloudevents-server/pkg/kafka" | ||
) | ||
|
||
func NewEventProducer(cfg config.Kafka) (*EventProducer, error) { | ||
writer, err := skakfa.NewWriter(cfg.Authentication, cfg.Brokers, "", cfg.ClientID) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
return &EventProducer{ | ||
writer: writer, | ||
unknowEventTopic: cfg.Producer.DefaultTopic, | ||
topicMapping: cfg.Producer.TopicMapping, | ||
}, nil | ||
} | ||
|
||
func NewEventConsumer(cfg config.Kafka, topic string, hander EventHandler) (*EventConsumer, error) { | ||
reader, err := skakfa.NewReader(cfg.Authentication, cfg.Brokers, topic, cfg.Consumer.GroupID, cfg.ClientID) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
return &EventConsumer{ | ||
reader: reader, | ||
handler: hander, | ||
}, nil | ||
} | ||
|
||
func NewEventConsumerGroup(cfg config.Kafka, hander EventHandler) (EventConsumerGroup, error) { | ||
consumerGroup := make(EventConsumerGroup) | ||
for _, topic := range cfg.Consumer.TopicMapping { | ||
if consumerGroup[topic] != nil { | ||
continue | ||
} | ||
consumer, err := NewEventConsumer(cfg, topic, hander) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
consumerGroup[topic] = consumer | ||
} | ||
|
||
return consumerGroup, nil | ||
} | ||
|
||
// EventProducer is the main structure for our event broker | ||
type EventProducer struct { | ||
writer *kafka.Writer | ||
unknowEventTopic string | ||
topicMapping map[string]string // Map event type to Kafka topic | ||
} | ||
|
||
func (eb *EventProducer) HandleCloudEvent(ctx context.Context, event cloudevents.Event) cloudevents.Result { | ||
eventType := event.Type() | ||
topic, ok := eb.topicMapping[eventType] | ||
|
||
// Use default topic if not found in mapping | ||
if !ok { | ||
log.Debug().Str("event-type", eventType).Msg("No topic found for event type, using default topic") | ||
topic = eb.unknowEventTopic | ||
} | ||
|
||
cloudEventBytes, err := event.MarshalJSON() | ||
if err != nil { | ||
log.Err(err).Msg("error marshalling Cloud Event") | ||
return cloudevents.ResultNACK | ||
} | ||
|
||
message := kafka.Message{ | ||
Topic: topic, | ||
Key: []byte(event.ID()), | ||
Value: cloudEventBytes, | ||
} | ||
|
||
err = eb.writer.WriteMessages(ctx, message) | ||
if err != nil { | ||
log.Err(err).Str("topic", topic).Str("ce-id", event.ID()).Msg("error writing message to Kafka") | ||
return err | ||
} | ||
|
||
log.Debug().Str("topic", topic).Str("ce-id", event.ID()).Msg("message written to Kafka") | ||
return cloudevents.ResultACK | ||
} | ||
|
||
type EventConsumerGroup map[string]*EventConsumer | ||
|
||
func (ecs EventConsumerGroup) Close() { | ||
for _, ec := range ecs { | ||
if ec != nil { | ||
ec.Close() | ||
} | ||
} | ||
} | ||
|
||
func (ecs EventConsumerGroup) Start(ctx context.Context) { | ||
wg := new(sync.WaitGroup) | ||
for _, ec := range ecs { | ||
if ec != nil { | ||
wg.Add(1) | ||
go func(c *EventConsumer) { | ||
c.Start(ctx) | ||
wg.Done() | ||
}(ec) | ||
} | ||
} | ||
wg.Wait() | ||
} | ||
|
||
type EventConsumer struct { | ||
reader *kafka.Reader | ||
writer *kafka.Writer // used for ack and put into dead letter queue. | ||
handler EventHandler | ||
faultTopic string // dead letter topic | ||
} | ||
|
||
// consumer workers | ||
func (ec *EventConsumer) Start(ctx context.Context) error { | ||
sigterm := make(chan os.Signal, 1) | ||
signal.Notify(sigterm, syscall.SIGINT, syscall.SIGTERM) | ||
|
||
defer ec.Close() | ||
|
||
for { | ||
select { | ||
case <-ctx.Done(): | ||
return ctx.Err() | ||
case <-sigterm: | ||
// When SIGTERM received, try to flush remaining messages | ||
// and exit gracefully | ||
return nil | ||
default: | ||
m, err := ec.reader.ReadMessage(ctx) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
var event cloudevents.Event | ||
err = event.UnmarshalJSON(m.Value) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
result := ec.handler.Handle(event) | ||
if !cloudevents.IsACK(result) { | ||
log.Error().Err(err).Msg("error handling event") | ||
// ec.writer.WriteMessages(ctx, kafka.Message{Topic: ec.faultTopic, Key: m.Key, Value: m.Value}) | ||
} | ||
} | ||
} | ||
} | ||
|
||
func (ec *EventConsumer) Close() { | ||
if ec.reader != nil { | ||
ec.reader.Close() | ||
} | ||
if ec.writer != nil { | ||
ec.writer.Close() | ||
} | ||
} |
Oops, something went wrong.