forked from googlearchive/go-gcm
-
Notifications
You must be signed in to change notification settings - Fork 1
/
client.go
346 lines (305 loc) · 8.92 KB
/
client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
package gcm
import (
"errors"
"sync"
"time"
log "github.com/sirupsen/logrus"
)
// httpC is an interface to stub the internal HTTP client.
type httpC interface {
Send(m HTTPMessage) (*HTTPResponse, error)
}
// xmppC is an interface to stub the internal XMPP client.
type xmppC interface {
Listen(h MessageHandler) error
Send(m XMPPMessage) (string, int, error)
Ping(timeout time.Duration) error
Close(graceful bool) error
IsClosed() bool
ID() string
JID() string
}
// gcmClient is a container for http and xmpp GCM clients.
type gcmClient struct {
sync.RWMutex
mh MessageHandler
cerr chan error
sandbox bool
debug bool
omitRetry bool
// Clients.
xmppClient xmppC
httpClient httpC
// Synchronize sending xmpp with the replacement of xmppClient through xmppChan
xmppChan chan xmppPacket
// GCM auth.
senderID string
apiKey string
// XMPP config.
pingInterval time.Duration
pingTimeout time.Duration
xmppConnectTimeout time.Duration
killMonitor chan bool
}
// NewClient creates a new GCM client for these credentials.
func NewClient(config *Config, h MessageHandler) (Client, error) {
switch {
case config == nil:
return nil, errors.New("config is nil")
case h == nil:
return nil, errors.New("message handler is nil")
case config.APIKey == "":
return nil, errors.New("empty api key")
}
useHTTPOnly := config.SenderID == ""
// Create GCM HTTP client.
httpc := newHTTPClient(
config.APIKey,
config.Debug,
config.OmitInternalRetry,
config.HTTPTimeout,
config.HTTPTransport,
)
var xmppc xmppC
var err error
if !useHTTPOnly {
// Create GCM XMPP client.
xmppc, err = newXMPPClient(config.Sandbox, config.SenderID, config.APIKey, config.Debug, config.OmitInternalRetry, config.XMPPConnectTimeout)
if err != nil {
return nil, err
}
}
// Construct GCM client.
return newGCMClient(xmppc, httpc, config, h)
}
// ID returns client unique identification.
func (c *gcmClient) ID() string {
c.RLock()
defer c.RUnlock()
return c.xmppClient.ID()
}
// JID returns client XMPP JID.
func (c *gcmClient) JID() string {
c.RLock()
defer c.RUnlock()
return c.xmppClient.JID()
}
// SendHTTP sends a message using the HTTP GCM connection server (blocking).
func (c *gcmClient) SendHTTP(m HTTPMessage) (*HTTPResponse, error) {
return c.httpClient.Send(m)
}
type xmppResponse struct {
MessageID string
Bytes int
Err error
}
type xmppPacket struct {
m XMPPMessage
rc chan xmppResponse
}
// SendXMPP sends a message using the XMPP GCM connection server (blocking).
func (c *gcmClient) SendXMPP(m XMPPMessage) (string, int, error) {
rc := make(chan xmppResponse)
c.xmppChan <- xmppPacket{m, rc}
resp := <-rc
return resp.MessageID, resp.Bytes, resp.Err
}
// Close will stop and close the corresponding client, releasing all resources (blocking).
func (c *gcmClient) Close() (err error) {
c.Lock()
defer c.Unlock()
close(c.killMonitor)
if c.xmppClient != nil {
err = c.xmppClient.Close(true)
}
return
}
// newGCMClient creates an instance of gcmClient.
func newGCMClient(xmppc xmppC, httpc httpC, config *Config, h MessageHandler) (*gcmClient, error) {
c := &gcmClient{
httpClient: httpc,
xmppClient: xmppc,
xmppChan: make(chan xmppPacket),
cerr: make(chan error, 1),
senderID: config.SenderID,
apiKey: config.APIKey,
mh: h,
debug: config.Debug,
omitRetry: config.OmitInternalRetry,
sandbox: config.Sandbox,
pingInterval: time.Duration(config.PingInterval) * time.Second,
pingTimeout: time.Duration(config.PingTimeout) * time.Second,
killMonitor: make(chan bool, 1),
}
if c.pingInterval <= 0 {
c.pingInterval = DefaultPingInterval
}
if c.pingTimeout <= 0 {
c.pingTimeout = DefaultPingTimeout
}
clientIsConnected := make(chan bool, 1)
if xmppc != nil {
// Create and monitor XMPP client.
go c.monitorXMPP(config.MonitorConnection, clientIsConnected)
select {
case err := <-c.cerr:
c.killMonitor <- true
return nil, err
case <-clientIsConnected:
return c, nil
case <-time.After(10 * time.Second):
c.killMonitor <- true
return nil, errors.New("Timed out attempting to connect client")
}
} else {
return c, nil
}
}
func (c *gcmClient) loop(activeMonitor bool) {
for {
select {
case <-c.killMonitor:
return
case packet := <-c.xmppChan:
c.RLock()
r := xmppResponse{}
r.MessageID, r.Bytes, r.Err = c.xmppClient.Send(packet.m)
packet.rc <- r
c.RUnlock()
case err := <-c.cerr:
if err == nil {
// No error, active close.
return
}
log.WithField("xmpp client ref", c.xmppClient.ID()).WithField("error", err).Error("gcm xmpp connection")
c.Lock()
c.replaceXMPPClient(activeMonitor)
c.Unlock()
}
}
}
// Replace the active client.
func (c *gcmClient) replaceXMPPClient(activeMonitor bool) {
prevc := c.xmppClient
c.cerr = make(chan error)
xmppc, err := connectXMPP(nil, c.sandbox, c.senderID, c.apiKey,
c.onCCSMessage, c.cerr, c.debug, c.omitRetry, c.xmppConnectTimeout)
if err != nil {
log.WithFields(log.Fields{"sender id": c.senderID, "error": err}).
Error("connect gcm xmpp client")
// Wait and try again.
// TODO: remove infinite loop.
time.Sleep(c.pingTimeout)
c.replaceXMPPClient(activeMonitor)
} else {
c.xmppClient = xmppc
// Close the previous client
go prevc.Close(true)
log.WithField("xmpp client ref", xmppc.ID()).WithField("previous xmpp client ref", prevc.ID()).
Warn("gcm xmpp client replaced")
if activeMonitor {
c.spinUpActiveMonitor()
}
}
}
func (c *gcmClient) spinUpActiveMonitor() {
go func(xc xmppC, ce chan<- error) {
// pingPeriodically is blocking.
perr := pingPeriodically(xc, c.pingTimeout, c.pingInterval)
if !xc.IsClosed() {
ce <- perr
}
}(c.xmppClient, c.cerr)
log.WithField("xmpp client ref", c.xmppClient.ID()).Debug("gcm xmpp connection monitoring started")
}
// monitorXMPP creates a new GCM XMPP client (if not provided), replaces the active client,
// closes the old client and starts monitoring the new connection.
func (c *gcmClient) monitorXMPP(activeMonitor bool, clientIsConnected chan bool) {
// Create XMPP client.
log.WithField("sender id", c.senderID).Debug("creating gcm xmpp client")
xmppc, err := connectXMPP(c.xmppClient, c.sandbox, c.senderID, c.apiKey,
c.onCCSMessage, c.cerr, c.debug, c.omitRetry, c.xmppConnectTimeout)
if err != nil {
// On initial connection, error exits the monitor.
return
}
log.WithField("xmpp client ref", xmppc.ID()).Info("gcm xmpp client created")
// If active monitoring is enabled, start pinging routine.
if activeMonitor {
c.spinUpActiveMonitor()
}
go c.loop(activeMonitor)
// Wait just a tick to ensure Listen got called - without this there's probably an edge-case where if the
// threading happens exactly wrong you can create a client, return it, and push out a send before you start
// listening for its response and therefore you miss the response. Given network latency that would probably
// not ever happen but just to be paranoid... this also ensures that the tests (which assert that Listen got
// called) reliably pass.
time.Sleep(time.Millisecond)
clientIsConnected <- true
}
// CCS upstream message callback.
// Tries to handle what it can here, before bubbling up.
func (c *gcmClient) onCCSMessage(cm CCSMessage) error {
switch cm.MessageType {
case CCSControl:
// Handle connection drainging request.
if cm.ControlType == CCSDraining {
log.WithField("xmpp client ref", c.xmppClient.ID()).
Warn("gcm xmpp connection draining requested")
// Server should close the current connection.
c.Lock()
cerr := c.cerr
c.Unlock()
cerr <- errors.New("connection draining")
}
// Don't bubble up control messages.
return nil
}
// Bubble up everything else.
return c.mh(cm)
}
// Creates a new xmpp client (if not provided), connects to the server and starts listening.
func connectXMPP(c xmppC, isSandbox bool, senderID string, apiKey string,
h MessageHandler, cerr chan<- error, debug bool, omitRetry bool, xmppConnectTimeout time.Duration) (xmppC, error) {
var xmppc xmppC
if c != nil {
// Use the provided client.
xmppc = c
} else {
// Create new.
var err error
xmppc, err = newXMPPClient(isSandbox, senderID, apiKey, debug, omitRetry, xmppConnectTimeout)
if err != nil {
cerr <- err
return nil, err
}
}
l := log.WithField("xmpp client ref", xmppc.ID())
// Start listening on this connection.
go func() {
l.Debug("gcm xmpp listen started")
if err := xmppc.Listen(h); err != nil {
l.WithField("error", err).Error("gcm xmpp listen")
cerr <- err
}
l.Debug("gcm xmpp listen finished")
}()
return xmppc, nil
}
// pingPeriodically sends periodic pings. If pong is received, the timer is reset.
func pingPeriodically(xm xmppC, timeout, interval time.Duration) error {
t := time.NewTimer(interval)
defer t.Stop()
for {
select {
case <-t.C:
if xm.IsClosed() {
return nil
}
if err := xm.Ping(timeout); err != nil {
return err
}
t.Reset(interval)
}
}
}