Allow storing several messages, will allow us to withstand log bursts better

Also, send a warning message when we start dropping logs.
This commit is contained in:
Pablo Carranza Velez 2016-07-19 22:27:42 +00:00
parent a8dd29cd82
commit 78b4570b93
2 changed files with 28 additions and 10 deletions

View File

@ -77,7 +77,7 @@ logTypes =
humanName: 'Restarting application' humanName: 'Restarting application'
logSystemMessage = (message, obj, eventName) -> logSystemMessage = (message, obj, eventName) ->
logger.log({ m: message, s: true }) logger.log({ m: message, s: 1 })
utils.mixpanelTrack(eventName ? message, obj) utils.mixpanelTrack(eventName ? message, obj)
logSystemEvent = (logType, app, error) -> logSystemEvent = (logType, app, error) ->

View File

@ -11,6 +11,7 @@ LOG_PUBLISH_INTERVAL = 110
# but we'll be conservative). So we limit a log message to 2 bytes less to account # but we'll be conservative). So we limit a log message to 2 bytes less to account
# for the [ and ] in the array. # for the [ and ] in the array.
MAX_LOG_BYTE_SIZE = 31998 MAX_LOG_BYTE_SIZE = 31998
MAX_MESSAGE_INDEX = 9
disableLogs = false disableLogs = false
@ -27,8 +28,10 @@ dockerPromise = initialised.then (config) ->
# Queue up any calls to publish logs whilst we wait to be initialised. # Queue up any calls to publish logs whilst we wait to be initialised.
publish = do -> publish = do ->
publishQueue = [] publishQueue = [[]]
messageIndex = 0
publishQueueRemainingBytes = MAX_LOG_BYTE_SIZE publishQueueRemainingBytes = MAX_LOG_BYTE_SIZE
logsOverflow = false
initialised.then (config) -> initialised.then (config) ->
if config.offlineMode if config.offlineMode
@ -38,15 +41,19 @@ publish = do ->
pubnub = PUBNUB.init(config.pubnub) pubnub = PUBNUB.init(config.pubnub)
channel = config.channel channel = config.channel
doPublish = -> doPublish = ->
return if publishQueue.length is 0 return if publishQueue[0].length is 0
pubnub.publish({ channel, message: publishQueue }) message = publishQueue.shift()
publishQueue = [] pubnub.publish({ channel, message })
publishQueueRemainingBytes = MAX_LOG_BYTE_SIZE if publishQueue.length is 0
publishQueue = [[]]
publishQueueRemainingBytes = MAX_LOG_BYTE_SIZE
messageIndex = Math.max(messageIndex - 1, 0)
logsOverflow = false if messageIndex < MAX_MESSAGE_INDEX
setInterval(doPublish, LOG_PUBLISH_INTERVAL) setInterval(doPublish, LOG_PUBLISH_INTERVAL)
return (message) -> return (message) ->
# Disable sending logs for bandwidth control # Disable sending logs for bandwidth control
return if disableLogs or publishQueueRemainingBytes <= 0 return if disableLogs or (messageIndex >= MAX_MESSAGE_INDEX and publishQueueRemainingBytes <= 0)
if _.isString(message) if _.isString(message)
message = { m: message } message = { m: message }
@ -54,9 +61,20 @@ publish = do ->
t: Date.now() t: Date.now()
m: '' m: ''
msgLength = Buffer.byteLength(JSON.stringify(message), 'utf8') msgLength = Buffer.byteLength(JSON.stringify(message), 'utf8')
publishQueueRemainingBytes -= msgLength return if msgLength > MAX_LOG_BYTE_SIZE # Unlikely, but we can't allow this
publishQueue.push(message) if publishQueueRemainingBytes >= 0 remaining = publishQueueRemainingBytes - msgLength
if remaining >= 0
publishQueue[messageIndex].push(message)
publishQueueRemainingBytes = remaining
else if messageIndex < MAX_MESSAGE_INDEX
messageIndex += 1
publishQueue[messageIndex] = [ message ]
publishQueueRemainingBytes = MAX_LOG_BYTE_SIZE - msgLength
else if !logsOverflow
logsOverflow = true
messageIndex += 1
publishQueue[messageIndex] = [ { m: 'Warning! Some logs dropped due to high load', t: Date.now(), s: 1 } ]
publishQueueRemainingBytes = 0
# disable: A Boolean to pause the Log Publishing - Logs are lost when paused. # disable: A Boolean to pause the Log Publishing - Logs are lost when paused.
exports.disableLogPublishing = (disable) -> exports.disableLogPublishing = (disable) ->