producer.go
5.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
package kafkax
import (
"encoding/json"
"fmt"
"github.com/Shopify/sarama"
"gitlab.fjmaimaimai.com/mmm-go/partner/pkg/infrastructure/message/models"
"log"
"strings"
"time"
)
// sarame kafka 消息生产
type KafkaMessageProducer struct {
KafkaHosts string
LogInfo models.LogInfo
}
// 同步发送
func (engine *KafkaMessageProducer) Publish(messages []*models.Message, option map[string]interface{}) (*models.MessagePublishResult, error) {
config := sarama.NewConfig()
config.Producer.Return.Successes = true
config.Producer.Return.Errors = true
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Producer.Retry.Max = 10
config.Producer.RequiredAcks = sarama.WaitForAll
config.Version = sarama.V0_11_0_0
brokerList := strings.Split(engine.KafkaHosts, ",")
producer, err := sarama.NewSyncProducer(brokerList, config)
if err != nil {
return nil, err
}
defer func() {
if err := producer.Close(); err != nil {
log.Println(err)
}
}()
var successMessageIds []int64
var errMessageIds []int64
for _, message := range messages {
if value, err := json.Marshal(message); err == nil {
msg := &sarama.ProducerMessage{
Topic: message.Topic,
Value: sarama.StringEncoder(value),
Timestamp: time.Now(),
}
partition, offset, err := producer.SendMessage(msg)
if err != nil {
errMessageIds = append(errMessageIds, message.Id)
log.Println(err)
} else {
successMessageIds = append(successMessageIds, message.Id)
var append = make(map[string]interface{})
append["topic"] = message.Topic
append["partition"] = partition
append["offset"] = offset
log.Println("kafka消息发送", append)
}
}
}
return &models.MessagePublishResult{SuccessMessageIds: successMessageIds, ErrorMessageIds: errMessageIds}, nil
}
// 消息调度器
type MessageDispatcher struct {
notifications chan struct{}
messageChan chan *models.Message
dispatchTicker *time.Ticker
messageRepository models.MessageRepository
producer models.MessageProducer
}
func (dispatcher *MessageDispatcher) MessagePublishedNotice() error {
time.Sleep(time.Second * 2)
dispatcher.notifications <- struct{}{}
return nil
}
func (dispatcher *MessageDispatcher) MessagePublish(messages []*models.Message) error {
for i := range messages {
dispatcher.messageChan <- messages[i]
}
return nil
}
// go dispatcher.Dispatch() 启动一个独立协程
func (dispatcher *MessageDispatcher) Dispatch() {
for {
select {
case <-dispatcher.dispatchTicker.C:
go func(dispatcher *MessageDispatcher) {
dispatcher.notifications <- struct{}{}
}(dispatcher)
case <-dispatcher.notifications:
if dispatcher.messageRepository == nil {
continue
}
messages, _ := dispatcher.messageRepository.FindNoPublishedStoredMessages()
var messagesInProcessIds []int64
for i := range messages {
messagesInProcessIds = append(messagesInProcessIds, messages[i].Id)
}
if messages != nil && len(messages) > 0 {
dispatcher.messageRepository.FinishMessagesStatus(messagesInProcessIds, int(models.InProcess))
reuslt, err := dispatcher.producer.Publish(messages, nil)
if err == nil && len(reuslt.SuccessMessageIds) > 0 {
dispatcher.messageRepository.FinishMessagesStatus(reuslt.SuccessMessageIds, int(models.Finished))
}
//发送失败的消息ID列表 更新状态 进行中->未开始
if len(reuslt.ErrorMessageIds) > 0 {
dispatcher.messageRepository.FinishMessagesStatus(reuslt.ErrorMessageIds, int(models.UnFinished))
}
}
case msg := <-dispatcher.messageChan:
dispatcher.producer.Publish([]*models.Message{msg}, nil)
}
}
}
type MessageDirector struct {
messageRepository models.MessageRepository
dispatcher *MessageDispatcher
}
func (d *MessageDirector) PublishMessages(messages []*models.Message) error {
if d.dispatcher == nil {
return fmt.Errorf("dispatcher还没有启动")
}
if d.messageRepository == nil {
d.dispatcher.MessagePublish(messages)
return nil
}
for _, message := range messages {
if err := d.messageRepository.SaveMessage(message); err != nil {
return err
}
}
if err := d.dispatcher.MessagePublishedNotice(); err != nil {
return err
}
return nil
}
// 消息发布器
// options["kafkaHosts"]="localhost:9092"
// options["timeInterval"]=time.Second*60*5
func NewMessageDirector(messageRepository models.MessageRepository, options map[string]interface{}) *MessageDirector {
dispatcher := &MessageDispatcher{
notifications: make(chan struct{}),
messageRepository: messageRepository,
messageChan: make(chan *models.Message, 100),
}
var hosts string
if kafkaHosts, ok := options["kafkaHosts"]; ok {
hosts = kafkaHosts.(string)
} else {
hosts = "localhost:9092"
}
dispatcher.producer = &KafkaMessageProducer{KafkaHosts: hosts, LogInfo: models.DefaultLog}
if interval, ok := options["timeInterval"]; ok {
dispatcher.dispatchTicker = time.NewTicker(interval.(time.Duration))
} else {
dispatcher.dispatchTicker = time.NewTicker(time.Second * 60 * 5)
}
go dispatcher.Dispatch()
return &MessageDirector{
messageRepository: messageRepository,
dispatcher: dispatcher,
}
}