作者 唐旭辉

.

... ... @@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"time"
"gitlab.fjmaimaimai.com/mmm-go/partnermg/pkg/port/consumer/configs"
... ... @@ -91,94 +90,94 @@ func NewRuner() *Runer {
return r
}
func (r *Runer) InitConsumer() error {
config := sarama.NewConfig()
//config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
config.Consumer.Offsets.Initial = sarama.OffsetOldest
config.Version = sarama.V0_10_2_1
if err := config.Validate(); err != nil {
msg := fmt.Sprintf("Kafka producer config invalidate. config: %v. err: %v", configs.Cfg, err)
logs.Error(msg)
panic(msg)
}
consumerGroup, err := sarama.NewConsumerGroup(r.msgConsumer.kafkaHosts, r.msgConsumer.groupId, config)
if err != nil {
return err
}
r.consumerGroup = consumerGroup
return nil
}
// func (r *Runer) InitConsumer() error {
// clusterCfg := cluster.NewConfig()
// clusterCfg.Consumer.Return.Errors = true
// clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
// clusterCfg.Group.Return.Notifications = true
// clusterCfg.Version = sarama.V0_10_2_1
// khosts := []string{"192.168.0.252:9092", "192.168.0.251:9092", "192.168.0.250:9092"}
// groupid := "partnermg_dev"
// topic := []string{"topic_test"}
// // consumer, err := cluster.NewConsumer(r.msgConsumer.kafkaHosts, r.msgConsumer.groupId, r.msgConsumer.topics, clusterCfg)
// consumer, err := cluster.NewConsumer(khosts, groupid, topic, clusterCfg)
// if err != nil {
// msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
// config := sarama.NewConfig()
// //config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin
// config.Consumer.Offsets.Initial = sarama.OffsetOldest
// config.Version = sarama.V0_10_2_1
// if err := config.Validate(); err != nil {
// msg := fmt.Sprintf("Kafka producer config invalidate. config: %v. err: %v", configs.Cfg, err)
// logs.Error(msg)
// panic(msg)
// }
// r.Consumer = consumer
// consumerGroup, err := sarama.NewConsumerGroup(r.msgConsumer.kafkaHosts, r.msgConsumer.groupId, config)
// if err != nil {
// return err
// }
// r.consumerGroup = consumerGroup
// return nil
// }
func (r *Runer) Start(ctx context.Context) {
defer func() {
if e := recover(); e != nil {
logs.Error(e)
}
}()
for {
select {
case <-ctx.Done():
logs.Warning("ctx cancel;consumerGroup.Close()")
r.consumerGroup.Close()
return
default:
if err := r.consumerGroup.Consume(ctx, r.msgConsumer.topics, r.msgConsumer); err != nil {
logs.Error("consumerGroup err:%s \n", err)
//等待重试
timer := time.NewTimer(5 * time.Second)
<-timer.C
}
r.msgConsumer.ready = make(chan struct{})
}
func (r *Runer) InitConsumer() error {
clusterCfg := cluster.NewConfig()
clusterCfg.Consumer.Return.Errors = true
clusterCfg.Consumer.Offsets.Initial = sarama.OffsetOldest
clusterCfg.Group.Return.Notifications = true
clusterCfg.Version = sarama.V0_10_2_1
// khosts := []string{"192.168.0.252:9092", "192.168.0.251:9092", "192.168.0.250:9092"}
// groupid := "partnermg_dev"
// topic := []string{"topic_test"}
consumer, err := cluster.NewConsumer(r.msgConsumer.kafkaHosts, r.msgConsumer.groupId, r.msgConsumer.topics, clusterCfg)
// consumer, err := cluster.NewConsumer(khosts, groupid, topic, clusterCfg)
if err != nil {
msg := fmt.Sprintf("Create kafka consumer error: %v. config: %v", err, clusterCfg)
logs.Error(msg)
panic(msg)
}
r.Consumer = consumer
return nil
}
// func (r *Runer) Start(ctx context.Context) {
// defer func() {
// if e := recover(); e != nil {
// logs.Error(e)
// }
// }()
// for {
// select {
// case msg, more := <-r.Consumer.Messages():
// if more {
// logs.Info("Partition:%d, Offset:%d, Key:%s, Value:%s Timestamp:%s\n", msg.Partition, msg.Offset, string(msg.Key), string(msg.Value), msg.Timestamp)
// r.Consumer.MarkOffset(msg, "") // mark message as processed
// }
// case err, more := <-r.Consumer.Errors():
// if more {
// logs.Info("Kafka consumer error: %v", err.Error())
// }
// case ntf, more := <-r.Consumer.Notifications():
// if more {
// logs.Info("Kafka consumer rebalance: %v", ntf)
// }
// case <-ctx.Done():
// logs.Info("Stop consumer server...")
// r.Consumer.Close()
// logs.Warning("ctx cancel;consumerGroup.Close()")
// r.consumerGroup.Close()
// return
// default:
// if err := r.consumerGroup.Consume(ctx, r.msgConsumer.topics, r.msgConsumer); err != nil {
// logs.Error("consumerGroup err:%s \n", err)
// //等待重试
// timer := time.NewTimer(5 * time.Second)
// <-timer.C
// }
// r.msgConsumer.ready = make(chan struct{})
// }
// }
// }
func (r *Runer) Start(ctx context.Context) {
for {
select {
case msg, more := <-r.Consumer.Messages():
if more {
logs.Info("Partition:%d, Offset:%d, Key:%s, Value:%s Timestamp:%s\n", msg.Partition, msg.Offset, string(msg.Key), string(msg.Value), msg.Timestamp)
r.Consumer.MarkOffset(msg, "") // mark message as processed
}
case err, more := <-r.Consumer.Errors():
if more {
logs.Info("Kafka consumer error: %v", err.Error())
}
case ntf, more := <-r.Consumer.Notifications():
if more {
logs.Info("Kafka consumer rebalance: %v", ntf)
}
case <-ctx.Done():
logs.Info("Stop consumer server...")
r.Consumer.Close()
return
}
}
}
func (r *Runer) IsReady() <-chan struct{} {
return r.msgConsumer.ready
}
... ...