Merge pull request #1981 from balena-os/sv-base-image

Refactor supervisor Dockerfile to remove custom dependencies
This commit is contained in:
bulldozer-balena[bot] 2022-07-18 18:33:18 +00:00 committed by GitHub
commit 6945f61a24
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 233 additions and 170 deletions

View File

@ -1,70 +1,125 @@
ARG ARCH=%%BALENA_ARCH%%
ARG NODE_VERSION=12.16.2
FROM balenalib/$ARCH-alpine-supervisor-base:3.11 as BUILD
# Used by livepush to support multi arch images in older
# balenaOS with buggy platform support
# see https://github.com/balena-os/balena-engine/issues/269
ARG PREFIX=library
ARG ARCH
ARG NODE_VERSION
ARG NODE_ARCHIVE="node-no-intl-v${NODE_VERSION}-linux-alpine-${ARCH}.tar.gz"
ARG S3_BASE="https://resin-packages.s3.amazonaws.com"
ARG NODE_LOCATION="${S3_BASE}/node/v${NODE_VERSION}/${NODE_ARCHIVE}"
###################################################
# Build the supervisor dependencies
###################################################
FROM balenalib/${ARCH}-alpine-node:12-run as build-base
# DO NOT REMOVE THE cross-build-* COMMANDS
# The following commands are absolutely needed. When we
# build for ARM architectures, we run this Dockerfile
# through sed, which uncomments these lines. There were
# other options for achieving the same setup, but this seems
# to be the least intrusive. The commands start commented
# out because the default build for balenaCI is amd64 (and
# we can't run any sed preprocessing on it there)
# RUN ["cross-build-start"]
ARG PREFIX
# Sanity check to prevent a prefix for a non-official docker image being
# inserted. Only 'library' and 'arm32v6' are allowed right now
RUN for allowed in "library" "arm32v6"; do [ "${PREFIX}" = "${allowed}" ] && break; done
WORKDIR /usr/src/app
RUN apk add --no-cache \
g++ \
git \
make \
python \
curl \
binutils \
python3 \
libgcc \
libstdc++ \
libuv \
sqlite-libs \
sqlite-dev \
dmidecode \
dbus-dev \
procmail
# procmail is installed for the lockfile binary
COPY build-utils/node-sums.txt .
# Install node from balena's prebuilt cache
RUN curl -SLO "${NODE_LOCATION}" \
&& grep "${NODE_ARCHIVE}" node-sums.txt | sha256sum -c - \
&& tar -xzf "${NODE_ARCHIVE}" -C /usr/local --strip-components=1 \
&& rm -f "${NODE_ARCHIVE}" \
&& strip /usr/local/bin/node
dbus-dev
COPY package*.json ./
RUN strip /usr/local/bin/node
# Just install dev dependencies first
RUN npm ci --build-from-source --sqlite=/usr/lib
# We only run these commands when executing through
# livepush, so they are presented as livepush directives
#dev-run=apk add --no-cache ip6tables iptables
###################################################
# Extra dependencies. This uses alpine 3.11 as the
# procmail package was removed on 3.12
###################################################
FROM ${PREFIX}/alpine:3.11 as extra
RUN apk add --update --no-cache procmail
###################################################
# Image with the final production dependencies.
# This image will also be be used for testing
###################################################
FROM ${PREFIX}/alpine:3.16 as runtime-base
WORKDIR /usr/src/app
# We just need the node binary in the final image
COPY --from=build-base /usr/local/bin/node /usr/local/bin/node
# Similarly, from the procmail package we just need the lockfile binary
COPY --from=extra /usr/bin/lockfile /usr/bin/lockfile
# Runtime dependencies
RUN apk add --no-cache \
ca-certificates \
iptables \
ip6tables \
rsync \
dbus \
libstdc++ \
dmidecode \
sqlite-libs
ARG ARCH
ARG VERSION=master
ARG DEFAULT_MIXPANEL_TOKEN=bananasbananas
ENV CONFIG_MOUNT_POINT=/boot/config.json \
LED_FILE=/dev/null \
SUPERVISOR_IMAGE=balena/$ARCH-supervisor \
VERSION=$VERSION \
DEFAULT_MIXPANEL_TOKEN=$DEFAULT_MIXPANEL_TOKEN
###################################################
# Use the base image to run the tests as livepush
###################################################
FROM runtime-base as test
WORKDIR /usr/src/app
# Copy node install from the build folder
COPY --from=build-base /usr/local/bin /usr/local/bin
COPY --from=build-base /usr/local/lib/node_modules /usr/local/lib/node_modules
# Copy build dependencies
COPY --from=build-base /usr/src/app/package.json ./
COPY --from=build-base /usr/src/app/node_modules ./node_modules
# Run livepush here
#dev-copy=entry.sh .
#dev-cmd-live=LIVEPUSH=1 ./entry.sh
# Copy build files
COPY build-utils ./build-utils
COPY webpack.config.js tsconfig.json tsconfig.release.json ./
COPY src ./src
COPY test ./test
COPY typings ./typings
RUN npm run test-nolint \
&& npm run build
# Run the tests
RUN npm run test-nolint
###################################################
# Build the production package
###################################################
FROM build-base as build-prod
WORKDIR /usr/src/app
# Copy build files
COPY build-utils ./build-utils
COPY webpack.config.js tsconfig.json tsconfig.release.json ./
COPY src ./src
COPY typings ./typings
# Compile the sources using the dev
# dependencies
RUN npm run build
# Run the production install here, to avoid the npm dependency on
# the later stage
@ -86,52 +141,21 @@ RUN npm ci --production --no-optional --unsafe-perm --build-from-source --sqlite
&& find . -type f -path '*/node_modules/knex/build*' -delete \
&& rm -rf node_modules/sqlite3/node.dtps
# RUN ["cross-build-end"]
FROM balenalib/$ARCH-alpine-supervisor-base:3.11
# RUN ["cross-build-start"]
RUN apk add --no-cache \
ca-certificates \
kmod \
iptables \
ip6tables \
rsync \
avahi \
dbus \
libstdc++ \
dmidecode \
sqlite-libs
###################################################
# Build the production image
###################################################
FROM runtime-base
WORKDIR /usr/src/app
COPY --from=BUILD /usr/local/bin/node /usr/local/bin/node
COPY --from=BUILD /usr/bin/lockfile /usr/bin/lockfile
COPY --from=BUILD /usr/src/app/dist ./dist
COPY --from=BUILD /usr/src/app/package.json ./
COPY --from=BUILD /usr/src/app/node_modules ./node_modules
COPY --from=build-prod /usr/src/app/dist ./dist
COPY --from=build-prod /usr/src/app/package.json ./
COPY --from=build-prod /usr/src/app/node_modules ./node_modules
COPY entry.sh .
RUN mkdir -p rootfs-overlay && \
(([ ! -d rootfs-overlay/lib64 ] && ln -s /lib rootfs-overlay/lib64) || true)
ARG ARCH
ARG VERSION=master
ARG DEFAULT_MIXPANEL_TOKEN=bananasbananas
ENV CONFIG_MOUNT_POINT=/boot/config.json \
LED_FILE=/dev/null \
SUPERVISOR_IMAGE=balena/$ARCH-supervisor \
VERSION=$VERSION \
DEFAULT_MIXPANEL_TOKEN=$DEFAULT_MIXPANEL_TOKEN
COPY avahi-daemon.conf /etc/avahi/avahi-daemon.conf
VOLUME /data
HEALTHCHECK --interval=5m --start-period=1m --timeout=30s --retries=3 \
CMD wget http://127.0.0.1:${LISTEN_PORT:-48484}/v1/healthy -O - -q
# RUN ["cross-build-end"]
CMD ["/usr/src/app/entry.sh"]

View File

@ -1,67 +0,0 @@
# This file is part of avahi.
#
# avahi is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# avahi is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with avahi; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA.
# See avahi-daemon.conf(5) for more information on this configuration
# file!
[server]
#host-name=foo
#domain-name=local
#browse-domains=0pointer.de, zeroconf.org
use-ipv4=yes
use-ipv6=yes
#allow-interfaces=eth0
#deny-interfaces=eth1
#check-response-ttl=no
#use-iff-running=no
#enable-dbus=yes
#disallow-other-stacks=no
#allow-point-to-point=no
#cache-entries-max=4096
#clients-max=4096
#objects-per-client-max=1024
#entries-per-entry-group-max=32
ratelimit-interval-usec=1000000
ratelimit-burst=1000
[wide-area]
enable-wide-area=yes
[publish]
#disable-publishing=no
#disable-user-service-publishing=no
#add-service-cookie=no
#publish-addresses=yes
publish-hinfo=no
publish-workstation=no
#publish-domain=yes
#publish-dns-servers=192.168.50.1, 192.168.50.2
#publish-resolv-conf-dns-servers=yes
#publish-aaaa-on-ipv4=yes
#publish-a-on-ipv6=no
[reflector]
#enable-reflector=no
#reflect-ipv=no
[rlimits]
#rlimit-as=
rlimit-core=0
rlimit-data=4194304
rlimit-fsize=0
rlimit-nofile=768
rlimit-stack=4194304

View File

@ -1,5 +0,0 @@
6d4ab189ece76bed2f40cffe1f1b4dd2d2a805e6fe10c577ae5ce89fce2ad53b node-no-intl-v12.16.2-linux-alpine-amd64.tar.gz
6d81db43dc1285656f6d95807e237e51d33774bed5daafad7f706a4e68b6b546 node-no-intl-v12.16.2-linux-alpine-i386.tar.gz
d964c84be94d0cf3f30f4c4d61d3f2e0d5439b43137c938d0a0df1b6860961fb node-no-intl-v12.16.2-linux-alpine-aarch64.tar.gz
00ac31e6e43319cc3786c0eb97784dad91b5152c5f8be31889215801dcab7712 node-no-intl-v12.16.2-linux-alpine-armv7hf.tar.gz
8c1cfa75a1523a5b21060201178f4a8cd51f492c93693ecfbd67e8b0a49b23f1 node-no-intl-v12.16.2-linux-alpine-rpi.tar.gz

View File

@ -5,7 +5,7 @@ import { Builder } from 'resin-docker-build';
import { promises as fs } from 'fs';
import * as Path from 'path';
import { Duplex, Readable } from 'stream';
import { Duplex, Readable, PassThrough, Stream } from 'stream';
import * as tar from 'tar-stream';
import { exec } from '../src/lib/fs-utils';
@ -68,26 +68,30 @@ export async function getCacheFrom(docker: Docker): Promise<string[]> {
}
}
// perform the build and return the image id
export async function performBuild(
docker: Docker,
dockerfile: Dockerfile,
dockerOpts: { [key: string]: any },
): Promise<void> {
): Promise<string> {
const builder = Builder.fromDockerode(docker);
// tar the directory, but replace the dockerfile with the
// livepush generated one
const tarStream = await tarDirectory(Path.join(__dirname, '..'), dockerfile);
const bufStream = new PassThrough();
return new Promise((resolve, reject) => {
const chunks = [] as Buffer[];
bufStream.on('data', (chunk) => chunks.push(Buffer.from(chunk)));
builder.createBuildStream(dockerOpts, {
buildSuccess: () => {
resolve();
// Return the build logs
resolve(Buffer.concat(chunks).toString('utf8'));
},
buildFailure: reject,
buildStream: (stream: Duplex) => {
stream.pipe(process.stdout);
stream.pipe(bufStream);
tarStream.pipe(stream);
},
});
@ -190,3 +194,20 @@ LED_FILE=/dev/null
`echo '${fileStr}' > /tmp/update-supervisor.conf`,
);
}
export async function readBuildCache(address: string): Promise<string[]> {
const cache = await runSshCommand(
address,
`cat /tmp/livepush-cache.json || true`,
);
return JSON.parse(cache || '[]');
}
export async function writeBuildCache(address: string, stageImages: string[]) {
// Convert the list to JSON with escaped quotes
const contents = JSON.stringify(stageImages).replace(/["]/g, '\\"');
return runSshCommand(
address,
`echo '${contents}' > /tmp/livepush-cache.json`,
);
}

View File

@ -14,24 +14,92 @@ interface Opts {
arch?: string;
}
// Source: https://github.com/balena-io/balena-cli/blob/f6d668684a6f5ea8102a964ca1942b242eaa7ae2/lib/utils/device/live.ts#L539-L547
function extractDockerArrowMessage(outputLine: string): string | undefined {
const arrowTest = /^.*\s*-+>\s*(.+)/i;
const match = arrowTest.exec(outputLine);
if (match != null) {
return match[1];
}
}
// Source: https://github.com/balena-io/balena-cli/blob/f6d668684a6f5ea8102a964ca1942b242eaa7ae2/lib/utils/device/live.ts#L300-L325
function getMultiStateImageIDs(buildLog: string): string[] {
const ids = [] as string[];
const lines = buildLog.split(/\r?\n/);
let lastArrowMessage: string | undefined;
for (const line of lines) {
// If this was a from line, take the last found
// image id and save it
if (
/step \d+(?:\/\d+)?\s*:\s*FROM/i.test(line) &&
lastArrowMessage != null
) {
ids.push(lastArrowMessage);
} else {
const msg = extractDockerArrowMessage(line);
if (msg != null) {
lastArrowMessage = msg;
}
}
}
return ids;
}
function getPathPrefix(arch: string) {
switch (arch) {
/**
* Proper paths are
* - armv6 - arm32v6
* - armv7hf - arm32v7
* - aarch64 - arm64v8
* - amd64 - amd64
* - i386 - i386
*
* We only set the prefix for v6 images since rpi devices are
* the only ones that seem to have the issue
* https://github.com/balena-os/balena-engine/issues/269
*/
case 'rpi':
return 'arm32v6';
default:
return 'library';
}
}
export async function initDevice(opts: Opts) {
const arch = opts.arch ?? (await device.getDeviceArch(opts.docker));
const image = `${opts.imageName}-${opts.imageTag}`;
const image = `${opts.imageName}:${opts.imageTag}`;
await device.performBuild(opts.docker, opts.dockerfile, {
buildargs: { ARCH: arch },
const buildCache = await device.readBuildCache(opts.address);
const buildLog = await device.performBuild(opts.docker, opts.dockerfile, {
buildargs: { ARCH: arch, PREFIX: getPathPrefix(arch) },
t: image,
labels: { 'io.balena.livepush-image': '1', 'io.balena.architecture': arch },
cachefrom: (await device.getCacheFrom(opts.docker)).concat(image),
cachefrom: (await device.getCacheFrom(opts.docker))
.concat(image)
.concat(buildCache),
nocache: opts.nocache,
});
const stageImages = getMultiStateImageIDs(buildLog);
// Store the list of stage images for the next time the sync
// command is called. This will only live until the device is rebooted
await device.writeBuildCache(opts.address, stageImages);
// Now that we have our new image on the device, we need
// to stop the supervisor, update
// /tmp/update-supervisor.conf with our version, and
// restart the supervisor
await device.stopSupervisor(opts.address);
await device.replaceSupervisorImage(opts.address, image, 'latest');
await device.replaceSupervisorImage(
opts.address,
opts.imageName,
opts.imageTag,
);
await device.startSupervisor(opts.address);
let supervisorContainer: undefined | Docker.ContainerInfo;
@ -45,5 +113,5 @@ export async function initDevice(opts: Opts) {
await Bluebird.delay(500);
}
}
return supervisorContainer.Id;
return { containerId: supervisorContainer.Id, stageImages };
}

View File

@ -11,11 +11,12 @@ export async function startLivepush(opts: {
containerId: string;
docker: Docker;
noinit: boolean;
stageImages?: string[];
}) {
const livepush = await Livepush.init({
stageImages: [],
...opts,
context: Path.join(__dirname, '..'),
stageImages: [],
});
livepush.addListener('commandExecute', ({ command }) => {
@ -34,8 +35,7 @@ export async function startLivepush(opts: {
});
const livepushExecutor = getExecutor(livepush);
chokidar
const watcher = chokidar
.watch('.', {
ignored: /((^|[\/\\])\..|(node_modules|sync|test)\/.*)/,
ignoreInitial: opts.noinit,
@ -43,6 +43,11 @@ export async function startLivepush(opts: {
.on('add', (path) => livepushExecutor(path))
.on('change', (path) => livepushExecutor(path))
.on('unlink', (path) => livepushExecutor(undefined, path));
return async () => {
await watcher.close();
await livepush.cleanupIntermediateContainers();
};
}
const getExecutor = (livepush: Livepush) => {

View File

@ -35,13 +35,15 @@ const argv = yargs
alias: 'i',
type: 'string',
description: 'Specify the name to use for the supervisor image on device',
default: 'livepush-supervisor',
default: `livepush-supervisor-${packageJson.version}`,
})
.options('image-tag', {
alias: 't',
type: 'string',
description: 'Specify the tag to use for the supervisor image on device',
default: packageJson.version,
description:
'Specify the tag to use for the supervisor image on device. It will not have any effect on balenaOS >= v2.89.0',
default: 'latest',
deprecated: true,
})
.options('nocache', {
description: 'Run the intial build without cache',
@ -59,9 +61,14 @@ const argv = yargs
await fs.readFile('Dockerfile.template'),
);
let cleanup = () => Promise.resolve();
let sigint = () => {
/** ignore empty */
};
try {
const docker = device.getDocker(address);
const containerId = await init.initDevice({
const { containerId, stageImages } = await init.initDevice({
address,
docker,
dockerfile,
@ -74,14 +81,24 @@ const argv = yargs
console.log(`Supervisor container: ${containerId}\n`);
await setupLogs(docker, containerId);
await startLivepush({
cleanup = await startLivepush({
dockerfile,
containerId,
docker,
noinit: true,
stageImages,
});
await new Promise((_, reject) => {
sigint = () => reject(new Error('User interrupt (Ctrl+C) received'));
process.on('SIGINT', sigint);
});
} catch (e) {
console.error('Error:');
console.error(e.message);
console.error('Error:', e.message);
} finally {
console.info('Cleaning up. Please wait ...');
await cleanup();
process.removeListener('SIGINT', sigint);
process.exit(1);
}
})();