mirror of
https://github.com/balena-os/balena-supervisor.git
synced 2025-02-21 10:01:55 +00:00
Add the ability to restore volumes from a backup.tgz in the data partition
Change-type: minor Signed-off-by: Pablo Carranza Velez <pablo@balena.io>
This commit is contained in:
parent
289b2dad4b
commit
501272266b
@ -7,8 +7,11 @@ deviceRegister = require 'resin-register-device'
|
||||
express = require 'express'
|
||||
bodyParser = require 'body-parser'
|
||||
Lock = require 'rwlock'
|
||||
path = require 'path'
|
||||
{ request, requestOpts } = require './lib/request'
|
||||
{ checkTruthy, checkInt } = require './lib/validation'
|
||||
{ pathExistsOnHost } = require './lib/fs-utils'
|
||||
constants = require './lib/constants'
|
||||
|
||||
DuplicateUuidError = (err) ->
|
||||
_.startsWith(err.message, '"uuid" must be unique')
|
||||
@ -83,6 +86,21 @@ module.exports = class APIBinder
|
||||
passthrough: passthrough
|
||||
@cachedBalenaApi = @balenaApi.clone({}, cache: {})
|
||||
|
||||
loadBackupFromMigration: (retryDelay) =>
|
||||
pathExistsOnHost(path.join('mnt/data', constants.migrationBackupFile))
|
||||
.then (exists) =>
|
||||
if !exists
|
||||
return
|
||||
console.log('Migration backup detected')
|
||||
@getTargetState()
|
||||
.then (targetState) =>
|
||||
@deviceState.restoreBackup(targetState)
|
||||
.catch (err) =>
|
||||
console.log('Error restoring migration backup, retrying: ', err)
|
||||
Promise.delay(retryDelay)
|
||||
.then =>
|
||||
@loadBackupFromMigration(retryDelay)
|
||||
|
||||
start: =>
|
||||
@config.getMany([ 'apiEndpoint', 'offlineMode', 'bootstrapRetryDelay' ])
|
||||
.then ({ apiEndpoint, offlineMode, bootstrapRetryDelay }) =>
|
||||
@ -109,6 +127,8 @@ module.exports = class APIBinder
|
||||
.then =>
|
||||
console.log('Starting current state report')
|
||||
@startCurrentStateReport()
|
||||
.then =>
|
||||
@loadBackupFromMigration(bootstrapRetryDelay)
|
||||
.then =>
|
||||
@readyForUpdates = true
|
||||
console.log('Starting target state poll')
|
||||
|
@ -13,6 +13,7 @@ Docker = require './lib/docker-utils'
|
||||
updateLock = require './lib/update-lock'
|
||||
{ checkTruthy, checkInt, checkString } = require './lib/validation'
|
||||
{ NotFoundError } = require './lib/errors'
|
||||
{ pathExistsOnHost } = require './lib/fs-utils'
|
||||
|
||||
ServiceManager = require './compose/service-manager'
|
||||
{ Service } = require './compose/service'
|
||||
@ -46,11 +47,6 @@ fetchAction = (service) ->
|
||||
serviceId: service.serviceId
|
||||
}
|
||||
|
||||
pathExistsOnHost = (p) ->
|
||||
fs.statAsync(path.join(constants.rootMountPoint, p))
|
||||
.return(true)
|
||||
.catchReturn(false)
|
||||
|
||||
# TODO: implement additional v2 endpoints
|
||||
# Some v1 endpoins only work for single-container apps as they assume the app has a single service.
|
||||
createApplicationManagerRouter = (applications) ->
|
||||
|
@ -76,15 +76,19 @@ module.exports = class Volumes
|
||||
|
||||
createFromLegacy: (appId) =>
|
||||
name = defaultLegacyVolume()
|
||||
@create({ name, appId })
|
||||
legacyPath = path.join(constants.rootMountPoint, 'mnt/data/resin-data', appId.toString())
|
||||
@createFromPath({ name, appId }, legacyPath)
|
||||
.catch (err) =>
|
||||
@logger.logSystemMessage("Warning: could not migrate legacy /data volume: #{err.message}", { error: err }, 'Volume migration error')
|
||||
|
||||
# oldPath must be a path inside /mnt/data
|
||||
createFromPath: ({ name, config = {}, appId }, oldPath) =>
|
||||
@create({ name, config, appId })
|
||||
.get('handle')
|
||||
.then (v) ->
|
||||
# Convert the path to be of the same mountpoint so that rename can work
|
||||
volumePath = path.join(constants.rootMountPoint, 'mnt/data', v.Mountpoint.split(path.sep).slice(3)...)
|
||||
legacyPath = path.join(constants.rootMountPoint, 'mnt/data/resin-data', appId.toString())
|
||||
safeRename(legacyPath, volumePath)
|
||||
.catch (err) =>
|
||||
@logger.logSystemMessage("Warning: could not migrate legacy /data volume: #{err.message}", { error: err }, 'Volume migration error')
|
||||
safeRename(oldPath, volumePath)
|
||||
|
||||
remove: ({ name, appId }) ->
|
||||
@logger.logSystemEvent(logTypes.removeVolume, { volume: { name } })
|
||||
|
@ -7,6 +7,9 @@ express = require 'express'
|
||||
bodyParser = require 'body-parser'
|
||||
hostConfig = require './host-config'
|
||||
network = require './network'
|
||||
execAsync = Promise.promisify(require('child_process').exec)
|
||||
mkdirp = Promise.promisify(require('mkdirp'))
|
||||
path = require 'path'
|
||||
|
||||
constants = require './lib/constants'
|
||||
validation = require './lib/validation'
|
||||
@ -41,6 +44,9 @@ validateState = Promise.method (state) ->
|
||||
if state.dependent?
|
||||
validateDependentState(state.dependent)
|
||||
|
||||
rimraf = (p) ->
|
||||
execAsync("rm -rf \"#{p.replace(/"/g, '\\"')}\"")
|
||||
|
||||
# TODO (refactor): This shouldn't be here, and instead should be part of the other
|
||||
# device api stuff in ./device-api
|
||||
createDeviceStateRouter = (deviceState) ->
|
||||
@ -406,6 +412,39 @@ module.exports = class DeviceState extends EventEmitter
|
||||
apps = _.keyBy(_.map(appsArray, singleToMulticontainerApp), 'appId')
|
||||
return { apps, config: deviceConf }
|
||||
|
||||
restoreBackup: (targetState) =>
|
||||
@setTarget(targetState)
|
||||
.then =>
|
||||
appId = _.keys(targetState.local.apps)[0]
|
||||
if !appId?
|
||||
throw new Error('No appId in target state')
|
||||
volumes = targetState.local.apps[appId].volumes
|
||||
backupPath = path.join(constants.rootMountPoint, 'mnt/data/backup')
|
||||
rimraf(backupPath) # We clear this path in case it exists from an incomplete run of this function
|
||||
.then ->
|
||||
mkdirp(backupPath)
|
||||
.then ->
|
||||
execAsync("tar -xzf backup.tgz -C #{backupPath} .", cwd: path.join(constants.rootMountPoint, 'mnt/data'))
|
||||
.then ->
|
||||
fs.readdirAsync(backupPath)
|
||||
.then (dirContents) =>
|
||||
Promise.mapSeries dirContents, (volumeName) =>
|
||||
if volumes[volumeName]?
|
||||
console.log("Creating volume #{volumeName} from backup")
|
||||
# If the volume exists (from a previous incomplete run of this restoreBackup), we delete it first
|
||||
@applications.volumes.get({ appId, name: volumeName })
|
||||
.then =>
|
||||
@applications.volumes.remove({ appId, name: volumeName })
|
||||
.catch(NotFoundError, _.noop)
|
||||
.then =>
|
||||
@applications.volumes.createFromPath({ appId, name: volumeName, config: volumes[volumeName] }, path.join(backupPath, volumeName))
|
||||
else
|
||||
console.log("WARNING: #{volumeName} is present in backup but not in target state, ignoring")
|
||||
.then ->
|
||||
rimraf(backupPath)
|
||||
.then ->
|
||||
rimraf(path.join(constants.rootMountPoint, 'mnt/data', constants.migrationBackupFile))
|
||||
|
||||
loadTargetFromFile: (appsPath) ->
|
||||
console.log('Attempting to load preloaded apps...')
|
||||
appsPath ?= constants.appsJsonPath
|
||||
|
@ -49,6 +49,7 @@ const constants = {
|
||||
},
|
||||
bootBlockDevice: '/dev/mmcblk0p1',
|
||||
hostConfigVarPrefix: 'HOST_',
|
||||
migrationBackupFile: 'backup.tgz',
|
||||
};
|
||||
|
||||
if (process.env.DOCKER_HOST == null) {
|
||||
|
@ -1,6 +1,8 @@
|
||||
import * as Bluebird from 'bluebird';
|
||||
import { fs } from 'mz';
|
||||
import * as path from 'path';
|
||||
import * as constants from './constants';
|
||||
import { ENOENT } from './errors';
|
||||
|
||||
export function writeAndSyncFile(path: string, data: string): Bluebird<void> {
|
||||
return Bluebird.resolve(fs.open(path, 'w')).then(fd => {
|
||||
@ -22,3 +24,9 @@ export function safeRename(src: string, dest: string): Bluebird<void> {
|
||||
.tap(fs.fsync)
|
||||
.then(fs.close);
|
||||
}
|
||||
|
||||
export function pathExistsOnHost(p: string): Bluebird<boolean> {
|
||||
return Bluebird.resolve(fs.stat(path.join(constants.rootMountPoint, p)))
|
||||
.return(true)
|
||||
.catchReturn(ENOENT, false);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user