Merge pull request #1241 from balena-io/alpine-base

Move to an alpine base image and drop i386-nlp support
This commit is contained in:
CameronDiver 2020-04-06 13:32:20 +01:00 committed by GitHub
commit fc5fa50657
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1704 additions and 1098 deletions

View File

@ -1,151 +1,128 @@
ARG ARCH=amd64
ARG NPM_VERSION=6.9.0
ARG NODE_VERSION=10.19.0
# The node version here should match the version of the runtime image which is
# specified in the base-image subdirectory in the project
FROM balenalib/raspberry-pi-node:10-run as rpi-node-base
FROM balenalib/armv7hf-node:10-run as armv7hf-node-base
FROM balenalib/aarch64-node:10-run as aarch64-node-base
RUN [ "cross-build-start" ]
RUN sed -i '/security.debian.org jessie/d' /etc/apt/sources.list
RUN [ "cross-build-end" ]
FROM balenalib/$ARCH-alpine-supervisor-base:3.11 as BUILD
FROM balenalib/amd64-node:10-run as amd64-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
FROM balenalib/i386-node:10-run as i386-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
FROM balenalib/i386-nlp-node:6-jessie as i386-nlp-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
# Setup webpack building base images
# We always do the webpack build on amd64, cause it's way faster
FROM amd64-node-base as rpi-node-build
FROM amd64-node-base as amd64-node-build
FROM amd64-node-base as armv7hf-node-build
FROM amd64-node-base as aarch64-node-build
FROM amd64-node-base as i386-node-build
FROM balenalib/amd64-node:6-build as i386-nlp-node-build
##############################################################################
FROM $ARCH-node-build as node-build
ARG ARCH
ARG NODE_VERSION
ARG NODE_ARCHIVE="node-no-intl-v${NODE_VERSION}-linux-alpine-${ARCH}.tar.gz"
ARG S3_BASE="https://resin-packages.s3.amazonaws.com"
ARG NODE_LOCATION="${S3_BASE}/node/v${NODE_VERSION}/${NODE_ARCHIVE}"
# DO NOT REMOVE THE cross-build-* COMMANDS
# The following commands are absolutely needed. When we
# build for ARM architectures, we run this Dockerfile
# through sed, which uncomments these lines. There were
# other options for achieving the same setup, but this seems
# to be the least intrusive. The commands start commented
# out because the default build for balenaCI is amd64 (and
# we can't run any sed preprocessing on it there)
# RUN ["cross-build-start"]
WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y \
g++ \
git \
libsqlite3-dev \
make \
python \
rsync \
curl \
&& rm -rf /var/lib/apt/lists/
RUN apk add --no-cache \
g++ \
git \
make \
python \
curl \
binutils \
libgcc \
libstdc++ \
libuv
COPY package.json package-lock.json /usr/src/app/
COPY build-conf/node-sums.txt .
ARG NPM_VERSION
# We first ensure that every architecture has an npm version
# which can do an npm ci, then we perform the ci using this
# temporary version
RUN curl -LOJ https://www.npmjs.com/install.sh && \
# This is required to avoid a bug in uid-number
# https://github.com/npm/uid-number/issues/7
npm config set unsafe-perm true && \
npm_install="#{NPM_VERSION}" npm_config_prefix=/tmp sh ./install.sh && \
JOBS=MAX /tmp/bin/npm ci --no-optional --unsafe-perm
# Install node from balena's prebuilt cache
RUN curl -SLO "${NODE_LOCATION}" \
&& grep "${NODE_ARCHIVE}" node-sums.txt | sha256sum -c - \
&& tar -xzf "${NODE_ARCHIVE}" -C /usr/local --strip-components=1 \
&& rm -f "${NODE_ARCHIVE}" \
&& strip /usr/local/bin/node
COPY webpack.config.js fix-jsonstream.js hardcode-migrations.js tsconfig.json tsconfig.release.json /usr/src/app/
COPY src /usr/src/app/src
COPY test /usr/src/app/test
COPY typings /usr/src/app/typings
COPY package*.json ./
RUN npm ci
# TODO: Once we support live copies and live runs, convert
# these
# issue: https://github.com/balena-io-modules/livepush/issues/73
RUN apk add --no-cache ip6tables iptables
COPY entry.sh .
#dev-cmd-live=LIVEPUSH=1 ./entry.sh
COPY webpack.config.js fix-jsonstream.js hardcode-migrations.js tsconfig.json tsconfig.release.json ./
COPY src ./src
COPY test ./test
COPY typings ./typings
RUN npm run test-nolint \
&& npm run build
##############################################################################
# Run the production install here, to avoid the npm dependency on
# the later stage
RUN npm ci --production --no-optional --unsafe-perm \
&& npm cache clean --force \
# For some reason this doesn't get cleared with the other
# cache
&& rm -rf node_modules/.cache \
# Remove various uneeded filetypes in order to reduce space
# We also remove the spurious node.dtps, see https://github.com/mapbox/node-sqlite3/issues/861
&& find . -path '*/coverage/*' -o -path '*/test/*' -o -path '*/.nyc_output/*' \
-o -name '*.tar.*' -o -name '*.in' -o -name '*.cc' \
-o -name '*.c' -o -name '*.coffee' -o -name '*.eslintrc' \
-o -name '*.h' -o -name '*.html' -o -name '*.markdown' \
-o -name '*.md' -o -name '*.patch' -o -name '*.png' \
-o -name '*.yml' -o -name "*.ts" \
-delete \
&& find . -type f -path '*/node_modules/sqlite3/deps*' -delete \
&& find . -type f -path '*/node_modules/knex/build*' -delete \
&& rm -rf node_modules/sqlite3/node.dtps
# Build nodejs dependencies
FROM $ARCH-node-base as node-deps
RUN [ "cross-build-start" ]
# RUN ["cross-build-end"]
FROM balenalib/$ARCH-alpine-supervisor-base:3.11
# RUN ["cross-build-start"]
RUN apk add --no-cache \
ca-certificates \
kmod \
iptables \
ip6tables \
rsync \
avahi \
dbus \
libstdc++
WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y \
g++ \
git \
libsqlite3-dev \
make \
python \
rsync \
curl \
&& rm -rf /var/lib/apt/lists/
COPY --from=BUILD /usr/local/bin/node /usr/local/bin/node
COPY --from=BUILD /usr/src/app/dist ./dist
COPY --from=BUILD /usr/src/app/package.json ./
COPY --from=BUILD /usr/src/app/node_modules ./node_modules
COPY entry.sh .
RUN mkdir -p rootfs-overlay && \
ln -s /lib rootfs-overlay/lib64
COPY package.json package-lock.json /usr/src/app/
ARG NPM_VERSION
# Install only the production modules that have C extensions
RUN curl -LOJ https://www.npmjs.com/install.sh && \
npm config set unsafe-perm true && \
npm_install="${NPM_VERSION}" npm_config_prefix=/tmp sh ./install.sh && \
JOBS=MAX /tmp/bin/npm ci --no-optional --unsafe-perm --production
# Remove various uneeded filetypes in order to reduce space
# We also remove the spurious node.dtps, see https://github.com/mapbox/node-sqlite3/issues/861
RUN find . -path '*/coverage/*' -o -path '*/test/*' -o -path '*/.nyc_output/*' \
-o -name '*.tar.*' -o -name '*.in' -o -name '*.cc' \
-o -name '*.c' -o -name '*.coffee' -o -name '*.eslintrc' \
-o -name '*.h' -o -name '*.html' -o -name '*.markdown' \
-o -name '*.md' -o -name '*.patch' -o -name '*.png' \
-o -name '*.yml' -o -name "*.ts" \
-delete \
&& find . -type f -path '*/node_modules/sqlite3/deps*' -delete \
&& find . -type f -path '*/node_modules/knex/build*' -delete \
&& rm -rf node_modules/sqlite3/node.dtps
COPY entry.sh package.json rootfs-overlay/usr/src/app/
RUN rsync -a --delete node_modules rootfs-overlay /build
RUN [ "cross-build-end" ]
##############################################################################
# Minimal runtime image
FROM balena/$ARCH-supervisor-base:v1.4.7
WORKDIR /usr/src/app
COPY --from=node-build /usr/src/app/dist ./dist
COPY --from=node-deps /build/node_modules ./node_modules
COPY --from=node-deps /build/rootfs-overlay/ /
# Remove default nproc limit for Avahi for it to work in-container
COPY avahi-daemon.conf /etc/avahi/avahi-daemon.conf
VOLUME /data
(([ ! -d rootfs-overlay/lib64 ] && ln -s /lib rootfs-overlay/lib64) || true)
ARG ARCH
ARG VERSION=master
ARG DEFAULT_MIXPANEL_TOKEN=bananasbananas
ENV CONFIG_MOUNT_POINT=/boot/config.json \
LED_FILE=/dev/null \
SUPERVISOR_IMAGE=resin/$ARCH-supervisor \
SUPERVISOR_IMAGE=balena/$ARCH-supervisor \
VERSION=$VERSION \
DEFAULT_MIXPANEL_TOKEN=$DEFAULT_MIXPANEL_TOKEN
COPY avahi-daemon.conf /etc/avahi/avahi-daemon.conf
VOLUME /data
HEALTHCHECK --interval=5m --start-period=1m --timeout=30s --retries=3 \
CMD wget -qO- http://127.0.0.1:${LISTEN_PORT:-48484}/v1/healthy || exit 1
CMD wget http://127.0.0.1:${LISTEN_PORT:-48484}/v1/healthy -O - -q
CMD [ "./entry.sh" ]
# RUN ["cross-build-end"]
CMD ["/usr/src/app/entry.sh"]

View File

@ -1,87 +0,0 @@
ARG ARCH=amd64
ARG NPM_VERSION=6.9.0
# The node version here should match the version of the runtime image which is
# specified in the base-image subdirectory in the project
FROM balenalib/raspberry-pi-node:10-stretch-run as rpi-node-base
FROM balenalib/armv7hf-node:10-stretch-run as armv7hf-node-base
FROM balenalib/aarch64-node:10-stretch-run as aarch64-node-base
RUN [ "cross-build-start" ]
RUN sed -i '/security.debian.org jessie/d' /etc/apt/sources.list
RUN [ "cross-build-end" ]
FROM balenalib/amd64-node:10-stretch-run as amd64-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
FROM balenalib/i386-node:10-stretch-run as i386-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
FROM balenalib/i386-nlp-node:6-jessie as i386-nlp-node-base
RUN echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-start && chmod +x /usr/bin/cross-build-start \
&& echo '#!/bin/sh\nexit 0' > /usr/bin/cross-build-end && chmod +x /usr/bin/cross-build-end
FROM $ARCH-node-base AS node-deps
RUN [ "cross-build-start" ]
WORKDIR /usr/src/app
RUN apt-get update \
&& apt-get install -y \
ca-certificates \
g++ \
git \
kmod \
iptables \
libnss-mdns \
make \
nodejs \
python \
rsync \
curl \
vim
ARG NPM_VERSION
# We first ensure that every architecture has an npm version
# which can do an npm ci, then we perform the ci using this
# temporary version
RUN curl -LOJ https://www.npmjs.com/install.sh && \
# This is required to avoid a bug in uid-number
# https://github.com/npm/uid-number/issues/7
npm config set unsafe-perm true && \
npm_install="${NPM_VERSION}" npm_config_prefix=/tmp sh ./install.sh
COPY package*.json ./
RUN JOBS=MAX /tmp/bin/npm ci --no-optional --unsafe-perm
COPY src src/
COPY typings typings/
COPY tsconfig.json tsconfig.release.json hardcode-migrations.js fix-jsonstream.js ./
RUN npm run build:debug
RUN mkdir -p dist && echo "require('../build/app.js')" > dist/app.js
COPY entry.sh .
RUN mkdir -p rootfs-overlay && \
(([ ! -d rootfs-overlay/lib64 ] && ln -s /lib rootfs-overlay/lib64) || true)
ARG ARCH
ARG VERSION=master
ARG DEFAULT_MIXPANEL_TOKEN=bananasbananas
ENV CONFIG_MOUNT_POINT=/boot/config.json \
LED_FILE=/dev/null \
SUPERVISOR_IMAGE=resin/$ARCH-supervisor \
VERSION=$VERSION \
DEFAULT_MIXPANEL_TOKEN=$DEFAULT_MIXPANEL_TOKEN
COPY avahi-daemon.conf /etc/avahi/avahi-daemon.conf
VOLUME /data
HEALTHCHECK --interval=5m --start-period=1m --timeout=30s --retries=3 \
CMD curl --fail http://127.0.0.1:${LISTEN_PORT:-48484}/v1/healthy
RUN [ "cross-build-end" ]
CMD DEBUG=1 ./entry.sh || while true; do echo 'Supervisor runtime exited - waiting for changes'; sleep 100; done;

172
Makefile
View File

@ -1,172 +0,0 @@
# balena-supervisor Makefile
#
# If you're looking for an easy way to develop on the supervisor, check ./tools/dev/dindctl, which provides a simplified interface
# to this makefile.
#
# Build targets (require Docker 17.05 or greater):
# * supervisor (default) - builds a balena-supervisor image
# * deploy - pushes a balena-supervisor image to the registry, retrying up to 3 times
# * nodedeps, nodebuild - builds the node component, with the node_modules and src at /usr/src/app and /build (also includes a rootfs-overlay there)
# * supervisor-dind: build the development docker-in-docker supervisor that run-supervisor uses (requires a SUPERVISOR_IMAGE to be available locally)
#
# Variables for build targets:
# * ARCH: amd64/rpi/i386/armv7hf/armel/aarch64 architecture for which to build the supervisor - default: amd64
# * IMAGE: image to build or deploy - default: balena/$(ARCH)-supervisor:latest
# * MIXPANEL_TOKEN: (optional) default mixpanel key to embed in the supervisor image
# * DISABLE_CACHE: if set to true, run build with no cache - default: false
# * DOCKER_BUILD_OPTIONS: Additional options for docker build, like --cache-from parameters
#
# Test/development targets:
# * run-supervisor, stop-supervisor - build and start or stop a docker-in-docker balena-supervisor (requires aufs, ability to run privileged containers, and a SUPERVISOR_IMAGE to be available locally)
#
# Variables for test/dev targets:
# * IMAGE: image to build and run (either for run-supervisor or test-gosuper/integration)
# * SUPERVISOR_IMAGE: In run-supervisor and supervisor-dind, the supervisor image to run inside the docker-in-docker image
# * PRELOADED_IMAGE: If true, will preload user app image from tools/dev/apps.json and bind mount apps.json into the docker-in-docker supervisor
# * MOUNT_DIST: If true, mount the dist folder into the docker-in-docker supervisor
# * MOUNT_NODE_MODULES: If true, mount the node_modules folder into the docker-in-docker supervisor
# * CONTAINER_NAME: For run-supervisor, specify the container name for the docker-in-docker container (default: supervisor which produces container balena-container-supervisor)
# * CONFIG_FILENAME: For run-supervisor, specify the filename to mount as config.json, relative to tools/dind/ (default: config.json)
# * DIND_IMAGE: For run-supervisor, specify the balenaOS image to use (default: resin/resinos:2.12.5_rev1-intel-nuc)
#
# Based on https://stackoverflow.com/a/8540718/2549019
# Retrieves a repo part of the given docker image string
# Param:
# 1. String to parse in form 'repo[:tag]'.
repo = $(firstword $(subst :, ,$1))
# Returns a tag (if any) on a docker image string.
# If there is no tag part in the string, returns latest
# Param:
# 1. String to parse in form 'repo[:tag]'.
tag = $(or $(word 2,$(subst :, ,$1)),latest)
THIS_FILE := $(lastword $(MAKEFILE_LIST))
help:
@cat $(THIS_FILE) | awk '{if(/^#/)print;else exit}' | sed 's/\#//'
OS := $(shell uname)
# If we're behind a proxy, use it during build
ifdef http_proxy
DOCKER_HTTP_PROXY=--build-arg http_proxy=$(http_proxy)
endif
ifdef https_proxy
DOCKER_HTTPS_PROXY=--build-arg https_proxy=$(https_proxy)
endif
ifdef no_proxy
DOCKER_NO_PROXY=--build-arg no_proxy=$(no_proxy)
endif
DISABLE_CACHE ?= 'false'
DOCKER_VERSION:=$(shell docker version --format '{{.Server.Version}}')
DOCKER_MAJOR_VERSION:=$(word 1, $(subst ., ,$(DOCKER_VERSION)))
DOCKER_MINOR_VERSION:=$(word 2, $(subst ., ,$(DOCKER_VERSION)))
DOCKER_GE_17_05 := $(shell [ $(DOCKER_MAJOR_VERSION) -gt 17 -o \( $(DOCKER_MAJOR_VERSION) -eq 17 -a $(DOCKER_MINOR_VERSION) -ge 5 \) ] && echo true)
# Default values for Mixpanel key
MIXPANEL_TOKEN ?= bananasbananas
# Default architecture and output image
ARCH ?= amd64
IMAGE ?= balena/$(ARCH)-supervisor:master
# Default values for run-supervisor
SUPERVISOR_IMAGE ?= balena/$(ARCH)-supervisor:master
CONTAINER_NAME ?= supervisor
CONFIG_FILENAME ?= config.json
DIND_IMAGE ?= resin/resinos:2.12.5_rev1-intel-nuc
# Bind mounts and variables for the run-supervisor target
SUPERVISOR_DIND_MOUNTS := -v $$(pwd)/config/supervisor-image.tar:/usr/src/supervisor-image.tar:ro -v $$(pwd)/start-resin-supervisor:/usr/bin/start-resin-supervisor:ro -v $$(pwd)/config/supervisor.conf:/etc/resin-supervisor/supervisor.conf
ifeq ($(PRELOADED_IMAGE),true)
SUPERVISOR_DIND_MOUNTS := ${SUPERVISOR_DIND_MOUNTS} -v $$(pwd)/apps.json:/mnt/data/apps.json
else
PRELOADED_IMAGE=
endif
ifeq ($(MOUNT_DIST), true)
SUPERVISOR_DIND_MOUNTS := ${SUPERVISOR_DIND_MOUNTS} -v $$(pwd)/../../dist:/resin-supervisor/dist
endif
ifeq ($(MOUNT_NODE_MODULES), true)
SUPERVISOR_DIND_MOUNTS := ${SUPERVISOR_DIND_MOUNTS} -v $$(pwd)/../../node_modules:/resin-supervisor/node_modules
endif
ifeq ($(MOUNT_BACKUP), true)
SUPERVISOR_DIND_MOUNTS := ${SUPERVISOR_DIND_MOUNTS} -v $$(pwd)/backup.tgz:/mnt/data/backup.tgz.mounted
endif
ifdef TARGET_COMPONENT
DOCKER_TARGET_COMPONENT := "--target=${TARGET_COMPONENT}"
else
DOCKER_TARGET_COMPONENT :=
endif
# Default target is to build the supervisor image
all: supervisor
supervisor-tar:
cd tools/dind \
&& mkdir -p config \
&& docker save --output config/supervisor-image.tar $(SUPERVISOR_IMAGE)
supervisor-conf:
cd tools/dind \
&& mkdir -p config \
&& echo "SUPERVISOR_IMAGE=$(call repo,$(SUPERVISOR_IMAGE))" > config/supervisor.conf \
&& echo "SUPERVISOR_TAG=$(call tag,$(SUPERVISOR_IMAGE))" >> config/supervisor.conf \
&& echo "LED_FILE=/dev/null" >> config/supervisor.conf
supervisor-dind: supervisor-tar supervisor-conf
run-supervisor: supervisor-dind
cd tools/dind \
&& ./balenaos-in-container/balenaos-in-container.sh \
--detach \
--config "$(CONFIG_FILENAME)" \
--image $(DIND_IMAGE) \
--id $(CONTAINER_NAME) \
--extra-args "${SUPERVISOR_DIND_MOUNTS}"
stop-supervisor:
-docker stop balena-container-$(CONTAINER_NAME) > /dev/null || true
-docker rm -f --volumes balena-container-$(CONTAINER_NAME) > /dev/null || true
supervisor-image:
ifneq ($(DOCKER_GE_17_05),true)
@echo "Docker >= 17.05 is needed to build the supervisor"
@exit 1
endif
docker build \
$(DOCKER_HTTP_PROXY) \
$(DOCKER_HTTPS_PROXY) \
$(DOCKER_NO_PROXY) \
$(DOCKER_TARGET_COMPONENT) \
$(DOCKER_BUILD_OPTIONS) \
--no-cache=$(DISABLE_CACHE) \
--build-arg ARCH=$(ARCH) \
--build-arg VERSION=$(shell jq -r .version package.json) \
--build-arg DEFAULT_MIXPANEL_TOKEN=$(MIXPANEL_TOKEN) \
`if [ -n "$$DEBUG" ]; then echo '-f Dockerfile.debug'; fi` \
-t $(IMAGE) .
supervisor:
@$(MAKE) -f $(THIS_FILE) IMAGE=$(IMAGE) ARCH=$(ARCH) supervisor-image
deploy:
@bash retry_docker_push.sh $(IMAGE)
nodedeps:
$(MAKE) -f $(THIS_FILE) TARGET_COMPONENT=node-deps IMAGE=$(IMAGE) ARCH=$(ARCH) supervisor-image
nodebuild:
$(MAKE) -f $(THIS_FILE) TARGET_COMPONENT=node-build IMAGE=$(IMAGE) ARCH=$(ARCH) supervisor-image
.PHONY: supervisor deploy nodedeps nodebuild supervisor-dind run-supervisor

174
README.md
View File

@ -1,24 +1,105 @@
# balena-supervisor
# balenaSupervisor
This is [balena](https://balena.io)'s supervisor, a program that runs on IoT devices and has the task of running user Apps (which are Docker containers), and updating them as the balena API informs it to.
balenaSupervisor is [balena](https://balena.io)'s on-device
agent, responsible for monitoring and applying changes to an
IoT device. It communicates with balenaCloud and handles the
lifecycle of an IoT application.
The supervisor is a Node.js program.
Using the [balenaEngine](https://balena.io/engine)'s (our
IoT-centric container engine) remote API, balenaSupervisor
will install, start, stop and monitor IoT applications,
delivered and ran as [OCI](https://www.opencontainers.org/)
compliant containers.
## Running a supervisor locally
balenaSupervisor is developed using Node.js.
## Contributing
If you're interested in contributing, that's awesome!
> Contributions are not only pull requests! Bug reports and
> feature requests are also extremely value additions.
### Issues
Feature request and bug reports should be submitted via
issues. One of the balenaSupervisor team will reply and work
with the community to plan a route forward. Although we may
never implement the feature, taking the time to let us know
what you as a user would like to see really helps our
decision making processes!
### Pull requests
Here's a few guidelines to make the process easier for everyone involved.
- Every PR _should_ have an associated issue, and the PR's opening comment should say "Fixes #issue" or "Closes #issue".
- We use [Versionist](https://github.com/resin-io/versionist) to manage versioning (and in particular, [semantic versioning](semver.org)) and generate the changelog for this project.
- At least one commit in a PR should have a `Change-Type: type` footer, where `type` can be `patch`, `minor` or `major`. The subject of this commit will be added to the changelog.
- Commits should be squashed as much as makes sense.
- Commits should be signed-off (`git commit -s`)
## Developing the supervisor
By far the most convenient way to develop the supervisor is
to download a development image of balenaOS from the
dashboard, and run it on a device you have to hand. You can
then use the local network to sync changes using
[livepush](http://github.com/balena-io-modules/livepush) and
`npm run sync`.
If you do not have a device available, it's possible to run
a supervisor locally, using
[balenaOS-in-container](https://github.com/balena-os/balenaos-in-container).
These steps are detailed below.
### Sync
Example:
```bash
$ npm run sync -- d19baeb.local
> balena-supervisor@10.11.3 sync /home/cameron/Balena/modules/balena-supervisor
> ts-node --project tsconfig.json sync/sync.ts "d19baeb.local"
Step 1/23 : ARG ARCH=amd64
Step 2/23 : ARG NODE_VERSION=10.19.0
Step 3/23 : FROM balenalib/$ARCH-alpine-supervisor-base:3.11 as BUILD
...
```
> Note: For .local resolution to work you must have installed
> and enabled MDNS. Alternatively you can use the device's
> local IP.
Sync will first run a new build on the target device (or
balenaOS container), after livepush has processed the
livepush specific commands and will start the new supervisor
image on device.
The supervisor logs are then streamed back from the device,
and livepush will then watch for changes on the local FS,
and sync any relevant file changes to the running supervisor
container. It will then decide if the container should be
restarted, or let nodemon handle the changes.
### Using balenaOS-in-container
This process will allow you to run a development instance of the supervisor on your local computer. It is not recommended for production scenarios, but allows someone developing on the supervisor to test changes quickly.
The supervisor is run inside a balenaOS instance running in a container, so effectively it's a Docker-in-Docker instance (or more precisely, [balenaEngine](https://github.com/resin-os/balena-engine)-in-Docker).
### Set up `config.json`
#### Set up `config.json`
To configure the supervisor, you'll need a `tools/dind/config.json` file. There's two options on how to get this file:
* Log in to the [balenaCloud dashboard](https://dashboard.balena-cloud.com), create or select an application, click "Add device" and on the Advanced section select "Download configuration file only". Make sure you use an x86 or amd64 device type for your application, for example Intel NUC.
* Install the balena CLI with `npm install -g balena-cli`, then login with `balena login` and finally run `balena config generate --app <appName> -o config.json` (choose the default settings whenever prompted).
- Log in to the [balenaCloud dashboard](https://dashboard.balena-cloud.com), create or select an application, click "Add device" and on the Advanced section select "Download configuration file only". Make sure you use an x86 or amd64 device type for your application, for example Intel NUC.
- Install the balena CLI with `npm install -g balena-cli`, then login with `balena login` and finally run `balena config generate --app <appName> -o config.json` (choose the default settings whenever prompted).
The `config.json` file should look something like this:
(Please note we've added comments to the JSON for better explanation - the actual file should be valid json *without* such comments)
(Please note we've added comments to the JSON for better explanation - the actual file should be valid json _without_ such comments)
```
{
@ -37,7 +118,7 @@ The `config.json` file should look something like this:
Additionally, the `uuid`, `registered_at` and `deviceId` fields will be added by the supervisor upon registration with the balena API. Other fields may be present (the format has evolved over time and will likely continue to do so) but they are not used by the supervisor.
### Start the supervisor instance
#### Start the supervisor instance
Ensure your kernel supports aufs (in Ubuntu, install `linux-image-extra-$(uname -r)`) and the `aufs` module is loaded (if necessary, run `sudo modprobe aufs`).
@ -63,7 +144,7 @@ This will mount the ./dist folder into the supervisor container and build the co
./dindctl refresh
```
### Testing with preloaded apps
#### Testing with preloaded apps
To test preloaded apps, run `balena preload` (see the [balena CLI docs](https://docs.balena.io/tools/cli/#preload-60-image-62-) on an OS image for the app you are testing with. Then copy the `apps.json` file from the `resin-data` partition into `tools/dind/apps.json`.
@ -79,7 +160,7 @@ Then run the supervisor like this:
This will make the Docker-in-Docker instance pull the image specified in `apps.json` before running the supervisor, simulating a preloaded balenaOS image.
### View the supervisor's logs
#### View the supervisor's logs
```bash
./dindctl logs
@ -92,43 +173,55 @@ additional options, for instance, to see the logs from the supervisor service:
./dindctl logs -fn 100 -u resin-supervisor
```
### Stop the supervisor
#### Stop the supervisor
```bash
./dindctl stop
```
This will stop the container and remove it, also removing its volumes.
This will stop the container and remove it, also removing
its volumes.
## Developing with a balenaOS device
## Developing using a production image or device
If you want to test local changes (only changes to the Node.js code are supported) on a real balenaOS device, provision
a [development OS image](https://docs.balena.io/understanding/understanding-devices/2.0.0/#dev-vs-prod-images) and power up the device. On the balenaCloud dashboard, take note of the device's IP address. Then run:
A production balena image does not have an open docker
socket, required for livepush to work. In this situation,
the `tools/sync.js` script can be used. Note that this
process is no longer actively developed, so your mileage may
vary.
```
./sync.js <device IP>
```
Bug reports and pull requests are still accepted for changes
to `sync.js`, but the balenaSupervisor team will focus on
`npm run sync` in the future.
This will build the supervisor code and sync it onto the running supervisor container inside the device, and then restart it.
## Building
## Build a local supervisor image
### Docker images
This should rarely be needed as `--mount-dist` allows you to test any changes to the Node.js code without a full rebuild. However, if you've changed code in the base image or the Dockerfile you will need to build the proper
supervisor Docker image.
Build the supervisor with a specific tag, and for a specific architecture, like this:
To build a docker image for amd64 targets, it's as simple
as:
```bash
./dindctl build --tag master --arch amd64
docker build . -t my-supervisor
```
This will build the supervisor Docker image locally. If you then run `docker images` you should see the repo/tag you
set there. Keep in mind several images will be pulled for caching purposes.
For other architectures, one must use the script
`automation/build.sh`. This is because of emulation specific
changes we have made to our base images to allow
cross-compilation.
## Base image
For example, to build for the raspberry pi 3:
The supervisor uses the [resin-supervisor-base](https://github.com/resin-io/resin-supervisor-base) as a base image.
This is a minimal Linux image containing busybox, rsync and Node.js, and it's built with the [Yocto project](https://www.yoctoproject.org/).
```sh
ARCH=armv7hf automation/build.sh
```
This will produce an image `balena/armv7hf-supervisor:<git branch name>`.
To avoid using the branch name, you can set a `TAG` variable
in your shell, before using the build script.
> Available architectures: `amd64`, `i386`, `aarch64`,
> `armv7hf` and `rpi`
## Testing
@ -138,23 +231,16 @@ You can run some unit tests with:
npm test
```
You'll need Node.js 6 installed, and having run `npm install` first. The supervisor runs on Node 6.13.1, so using that specific version will ensure tests run in the same environment as production.
The supervisor runs on Node v10.19.0, so using that specific
version will ensure tests run in the same environment as
production.
## Contributing
If you're interested in contributing, that's awesome!
Here's a few guidelines to make the process easier for everyone involved.
* Every PR *should* have an associated issue, and the PR's opening comment should say "Fixes #issue" or "Closes #issue".
* We use [Versionist](https://github.com/resin-io/versionist) to manage versioning (and in particular, [semantic versioning](semver.org)) and generate the changelog for this project.
* At least one commit in a PR should have a `Change-Type: type` footer, where `type` can be `patch`, `minor` or `major`. The subject of this commit will be added to the changelog.
* Commits should be squashed as much as makes sense.
* Commits should be signed-off (`git commit -s`)
Alternatively, tests will be run when building the image,
which ensures that the environment is exactly the same.
## License
Copyright 2015 Rulemotion Ltd.
Copyright 2020 Balena Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -4,12 +4,11 @@
#
# Required variables:
# * ARCH
# * TAG
#
# Optional variables:
# * TAG: The default will be the current branch name
# * PUSH_IMAGES
# * CLEANUP
# * ENABLE_TESTS
# * MIXPANEL_TOKEN: default key to inject in the supervisor image
# * EXTRA_TAG: when PUSH_IMAGES is true, additional tag to push to the registries
#
@ -25,99 +24,104 @@
# to the docker registry.
# If CLEANUP is "true", all images will be removed after pushing - including any relevant images
# that may have been on the host from before the build, so be careful!
# If ENABLE_TESTS is "true", tests will be run.
#
# Requires docker >= 17.05, and make.
# Requires docker >= 17.05
#
set -e
THIS_FILE=$0
if [ -z "$ARCH" ] || [ -z "$TAG" ]; then
cat $THIS_FILE | awk '{if(/^#/)print;else exit}' | tail -n +2 | sed 's/\#//'
if [ -z "$ARCH" ] ; then
awk '{if(/^#/)print;else exit}' "${THIS_FILE}" | tail -n +2 | sed 's/\#//'
exit 1
fi
if [ -z "$TAG" ]; then
TAG=$(git rev-parse --abbrev-ref HEAD)
fi
if ! [ -x "$(command -v npx)" ]; then
echo 'NPM/npx is required to execute this script' >&2
exit 1
fi
# This is the supervisor image we will produce
TARGET_IMAGE=balena/$ARCH-supervisor:$TAG$DEBUG
TARGET_IMAGE=balena/$ARCH-supervisor:$TAG
TARGET_BUILD_IMAGE=balena/$ARCH-supervisor:$TAG-build
# Intermediate images and cache
NODE_IMAGE=balena/$ARCH-supervisor-node:$TAG$DEBUG
NODE_BUILD_IMAGE=balena/$ARCH-supervisor-node:$TAG-build$DEBUG
TARGET_CACHE_MASTER=balena/$ARCH-supervisor:master$DEBUG
NODE_CACHE_MASTER=balena/$ARCH-supervisor-node:master$DEBUG
NODE_BUILD_CACHE_MASTER=balena/$ARCH-supervisor-node:master-build$DEBUG
MASTER_IMAGE=balena/$ARCH-supervisor:master
MASTER_BUILD_IMAGE=balena/$ARCH-supervisor:master-build
CACHE_FROM=""
function useCache() {
image=$1
# Always add the cache because we can't do it from
# a subshell and specifying a missing image is fine
CACHE_FROM="$CACHE_FROM --cache-from $image"
CACHE_FROM="${CACHE_FROM} --cache-from $image"
# Pull in parallel for speed
docker pull $image &
docker pull "$image" &
}
useCache $TARGET_IMAGE
useCache $TARGET_CACHE_MASTER
useCache $NODE_IMAGE
useCache $NODE_CACHE_MASTER
# Debug images don't include nodebuild or use the supervisor-base image
if [ -z "$DEBUG" ]; then
useCache $NODE_BUILD_IMAGE
useCache $NODE_BUILD_CACHE_MASTER
function retryImagePush() {
local image=$1
local -i retries
local success=1
docker pull balenalib/amd64-node:6-build &
docker pull balena/$ARCH-supervisor-base:v1.4.7 &
fi
# Pull images we depend on in parallel to avoid needing
# to do it in serial during the build
docker pull balenalib/raspberry-pi-node:10-run &
docker pull balenalib/armv7hf-node:10-run &
docker pull balenalib/aarch64-node:10-run &
docker pull balenalib/amd64-node:10-run &
docker pull balenalib/i386-node:10-run &
docker pull balenalib/i386-nlp-node:6-jessie &
while (( retries < 3 )); do
retries+=1
if docker push "${image}"; then
success=0
break
fi
done
return $success
}
# If we're building for an ARM architecture, we uncomment
# the cross-build commands, to enable emulation
function processDockerfile() {
if [ "${ARCH}" == "aarch64" ] || [ "${ARCH}" == "armv7hf" ] || [ "${ARCH}" == "rpi" ]; then
sed -E 's/#(.*"cross-build-(start|end)".*)/\1/g' Dockerfile
else
cat Dockerfile
fi
}
export ARCH
useCache "${TARGET_IMAGE}"
useCache "${TARGET_BUILD_IMAGE}"
useCache "${MASTER_IMAGE}"
useCache "${MASTER_BUILD_IMAGE}"
# Wait for our cache to be downloaded
wait
export DOCKER_BUILD_OPTIONS=${CACHE_FROM}
export ARCH
export MIXPANEL_TOKEN
BUILD_ARGS="$CACHE_FROM --build-arg ARCH=${ARCH}"
# Try to build the first stage
processDockerfile | docker build -f - -t "${TARGET_BUILD_IMAGE}" --target BUILD ${BUILD_ARGS} .
# Debug images don't include nodebuild
if [ -z "$DEBUG" ]; then
make IMAGE=$NODE_BUILD_IMAGE nodebuild
if [ "$PUSH_IMAGES" = "true" ]; then
make IMAGE=$NODE_BUILD_IMAGE deploy &
fi
export DOCKER_BUILD_OPTIONS="${DOCKER_BUILD_OPTIONS} --cache-from ${NODE_BUILD_IMAGE}"
fi
# Now try to build the final stage
processDockerfile | docker build -f - -t "${TARGET_IMAGE}" ${BUILD_ARGS} .
make IMAGE=$NODE_IMAGE nodedeps
if [ "$PUSH_IMAGES" = "true" ]; then
make IMAGE=$NODE_IMAGE deploy &
fi
export DOCKER_BUILD_OPTIONS="${DOCKER_BUILD_OPTIONS} --cache-from ${NODE_IMAGE}"
if [ "${PUSH_IMAGES}" == "true" ]; then
retryImagePush "${TARGET_BUILD_IMAGE}" &
retryImagePush "${TARGET_IMAGE}" &
# This is the step that actually builds the supervisor
make IMAGE=$TARGET_IMAGE supervisor
if [ "$PUSH_IMAGES" = "true" ]; then
make IMAGE=$TARGET_IMAGE deploy &
if [ -n "$EXTRA_TAG" ]; then
docker tag $TARGET_IMAGE balena/$ARCH-supervisor:$EXTRA_TAG
make IMAGE=balena/$ARCH-supervisor:$EXTRA_TAG deploy &
if [ -n "${EXTRA_TAG}" ]; then
docker tag "${TARGET_IMAGE}" "balena/${ARCH}-supervisor:${EXTRA_TAG}"
retryImagePush "balena/${ARCH}-supervisor:${EXTRA_TAG}" &
fi
fi
# Wait for any ongoing deploys
wait
if [ "$CLEANUP" = "true" ]; then
docker rmi \
$TARGET_IMAGE \
$NODE_IMAGE \
$NODE_BUILD_IMAGE \
$TARGET_CACHE
"${TARGET_IMAGE}" \
"${TARGET_BUILD_IMAGE}" \
"${MASTER_IMAGE}" \
"${MASTER_BUILD_IMAGE}"
fi

View File

@ -1,97 +0,0 @@
// Deploy a supervisor image as a supervisor_release in the balena API
//
// Environment variables:
// This program deploys for all device types, or only device types where the architecture matches $ARCH, if specified.
// It deploys to the API specified by $API_ENDPOINT and using a provided $API_TOKEN or $API_KEY
// (if both are set, API_TOKEN is preferred).
// The tag to deploy must be passed as $TAG.
//
const { PinejsClientRequest } = require('pinejs-client-request');
const Promise = require('bluebird');
const _ = require('lodash');
const url = require('url');
const apiEndpoint = process.env.API_ENDPOINT;
const apikey = process.env.API_KEY;
const arch = process.env.ARCH;
const tag = process.env.TAG;
const apiToken = process.env.API_TOKEN;
if (_.isEmpty(apikey) && _.isEmpty(apiToken)) {
console.error('Skipping deploy due to empty API_KEY and API_TOKEN');
process.exit(0);
}
if (_.isEmpty(apiEndpoint)) {
console.error('Please set a valid $API_ENDPOINT');
process.exit(1);
}
if (_.isEmpty(tag)) {
console.error('Please set a $TAG to deploy');
process.exit(1);
}
const supportedArchitectures = [ 'amd64', 'rpi', 'aarch64', 'i386', 'armv7hf', 'i386-nlp' ];
if (!_.isEmpty(arch) && !_.includes(supportedArchitectures, arch)) {
console.error('Invalid architecture ' + arch);
process.exit(1);
}
const archs = _.isEmpty(arch) ? supportedArchitectures : [ arch ];
const quarkSlugs = [ 'iot2000', 'cybertan-ze250' ];
const requestOpts = {
gzip: true,
timeout: 30000
};
if (!_.isEmpty(apiToken)) {
requestOpts.headers = {
Authorization: 'Bearer ' + apiToken
};
}
const apiEndpointWithPrefix = url.resolve(apiEndpoint, '/v2/')
const balenaApi = new PinejsClientRequest({
apiPrefix: apiEndpointWithPrefix,
passthrough: requestOpts
});
balenaApi._request(_.extend({
url: apiEndpoint + '/config/device-types',
method: 'GET'
}, balenaApi.passthrough))
.then( (deviceTypes) => {
// This is a critical step so we better do it serially
return Promise.mapSeries(deviceTypes, (deviceType) => {
if (archs.indexOf(deviceType.arch) >= 0) {
const options = {};
let arch = deviceType.arch;
if (_.isEmpty(apiToken)) {
options.apikey = apikey;
}
if (quarkSlugs.indexOf(deviceType.slug) >= 0) {
arch = 'i386-nlp';
}
console.log(`Deploying ${tag} for ${deviceType.slug}`);
return balenaApi.post({
resource: 'supervisor_release',
body: {
image_name: `balena/${arch}-supervisor`,
supervisor_version: tag,
device_type: deviceType.slug,
is_public: true
},
options
});
}
});
})
.then( () => {
process.exit(0);
})
.catch( (err) => {
console.error(`Error when deploying the supervisor to ${apiEndpoint}`, err, err.stack);
process.exit(1);
});

View File

@ -1,16 +0,0 @@
{
"name": "balena-supervisor-automation",
"version": "1.0.0",
"description": "Tools to build/deploy balena-supervisor",
"main": "deploy-to-balena-cloud.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Balena Ltd.",
"license": "Apache-2.0",
"dependencies": {
"bluebird": "^3.5.0",
"lodash": "^4.17.4",
"pinejs-client": "^4.0.0"
}
}

View File

@ -1,77 +0,0 @@
#!/bin/bash
# PR a supervisor release to meta-resin
#
# It clones meta-resin and opens a PR changing the supervisor version to $TAG.
# If a meta-resin folder exists, it assumes it has a meta-resin git repo.
#
# If $PR_1X is "true", an additional PR for 1.X will be created.
#
# Requires ssh keys set up to push and create the pull-request.
# Requires $TAG to be set to the supervisor version to use.
# Requires hub to be installed (see https://github.com/github/hub)
#
set -e
THIS_FILE=$0
if [ -z "$TAG" ]; then
cat $THIS_FILE | awk '{if(/^#/)print;else exit}' | tail -n +2 | sed 's/\#//'
exit 1
fi
REPO_URL="git@github.com:resin-os/meta-resin.git"
USER=${USER:-$(whoami)}
function prepareBranches() {
BASE=$1
HEAD=$2
git checkout $BASE
git reset HEAD
git checkout .
git fetch
git merge origin/${BASE}
git checkout -b ${HEAD}
}
function setSupervisorTag() {
sed -i "s/SUPERVISOR_TAG ?= \".*\"/SUPERVISOR_TAG ?= \"${TAG}\"/" meta-resin-common/recipes-containers/docker-disk/docker-resin-supervisor-disk.bb
}
function commitAndPR() {
BASE=$1
HEAD=$2
git commit -as -m "
docker-resin-supervisor-disk: Update to ${TAG}
Changelog-Entry: Update supervisor to ${TAG}
Change-Type: patch
"
git push origin $HEAD
hub pull-request -b ${BASE} -m "${BASE}: Update supervisor to ${TAG}
Change-Type: patch
"
}
if [ ! -d "./meta-resin" ]; then
echo "Cloning meta-resin..."
git clone $REPO_URL
else
echo "Using available meta-resin repo"
fi
cd meta-resin
echo "Creating pull request to add supervisor ${TAG} on master"
prepareBranches master supervisor-${TAG}
setSupervisorTag
commitAndPR master supervisor-${TAG}
if [ "$PR_1X" = "true" ]; then
echo "Creating pull request to add supervisor ${TAG} on 1.X"
prepareBranches 1.X 1.X-supervisor-${TAG}
setSupervisorTag
commitAndPR 1.X 1.X-supervisor-${TAG}
fi

5
build-conf/node-sums.txt Normal file
View File

@ -0,0 +1,5 @@
c1e6e0ee0f3c903151d4118e7068d4ae5ef5b408c969d792b0a4af6dfe04275a node-no-intl-v10.19.0-linux-alpine-amd64.tar.gz
ba33990227362ecfe238839ee5e7611b55f3faf860fc44ce694a0aeebb265c2f node-no-intl-v10.19.0-linux-alpine-i386.tar.gz
e015cba0a644f9e08a641affec25afc047af8d0103c9353550c12734bba3b423 node-no-intl-v10.19.0-linux-alpine-armv7hf.tar.gz
9890e656869591cd3f0efeb10046103d324cd48147d40d6d7f6efd1eb680694c node-no-intl-v10.19.0-linux-alpine-aarch64.tar.gz
1227cda220cf86d72f1c57790660a3e3ec007ee6946cf4d92f73692eaae3874b node-no-intl-v10.19.0-linux-alpine-rpi.tar.gz

View File

@ -20,11 +20,6 @@ defaults: &defaults
nodejs-npm \
openssh-client
- checkout
- run:
name: Install npm dependencies
working_directory: /tmp/build/automation
command: |
JOBS=max npm install
- run:
name: Initialize the submodules (yocto layers)
command: |
@ -95,15 +90,6 @@ jobs:
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: ''
i386-nlp:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: i386-nlp
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: ''
armv7hf:
<<: *defaults
environment:
@ -131,60 +117,6 @@ jobs:
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: ''
amd64-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: amd64
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
i386-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: i386
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
i386-nlp-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: i386-nlp
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
armv7hf-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: armv7hf
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
aarch64-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: aarch64
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
rpi-debug:
<<: *defaults
environment:
DOCKER_USERNAME: travisciresin
ARCH: rpi
PUSH_IMAGES: 'true'
STAGING_API_ENDPOINT: https://api.balena-staging.com
PRODUCTION_API_ENDPOINT: https://api.balena-cloud.com
DEBUG: '-debug'
workflows:
version: 2
@ -206,24 +138,3 @@ workflows:
- aarch64:
requires:
- generic
- i386-nlp:
requires:
- generic
- amd64-debug:
requires:
- generic
- i386-debug:
requires:
- generic
- rpi-debug:
requires:
- generic
- armv7hf-debug:
requires:
- generic
- aarch64-debug:
requires:
- generic
- i386-nlp-debug:
requires:
- generic

View File

@ -2,21 +2,6 @@
set -o errexit
# Start Avahi to allow MDNS lookups and remove
# any pre-defined services
rm -f /etc/avahi/services/*
mkdir -p /var/run/dbus
rm -f /var/run/avahi-daemon/pid
rm -f /var/run/dbus/pid
if [ -x /etc/init.d/dbus-1 ]; then
/etc/init.d/dbus-1 start;
elif [ -x /etc/init.d/dbus ]; then
/etc/init.d/dbus start;
else
echo "Could not start container local dbus daemon. Avahi services may fail!";
fi;
/etc/init.d/avahi-daemon start
# If the legacy /tmp/resin-supervisor exists on the host, a container might
# already be using to take an update lock, so we symlink it to the new
# location so that the supervisor can see it
@ -68,4 +53,9 @@ fi
# not a problem.
modprobe ip6_tables || true
exec node /usr/src/app/dist/app.js
if [ "${LIVEPUSH}" -eq "1" ]; then
exec npx nodemon --watch src --watch typings --ignore tests \
--exec node -r ts-node/register/transpile-only -r coffeescript/register src/app.ts
else
exec node /usr/src/app/dist/app.js
fi

1085
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,8 @@
"packagejson:copy": "cp package.json build/",
"testitems:copy": "cp -r test/data build/test/",
"lint:coffee": "balena-lint src/ test/",
"lint:typescript": "balena-lint -e ts -e js --typescript src/ test/ typings/ && tsc --noEmit && tsc --noEmit --project tsconfig.js.json"
"lint:typescript": "balena-lint -e ts -e js --typescript src/ test/ typings/ && tsc --noEmit && tsc --noEmit --project tsconfig.js.json",
"sync": "ts-node sync/sync.ts"
},
"private": true,
"dependencies": {
@ -62,6 +63,7 @@
"@types/sinon": "^7.5.2",
"@types/sinon-chai": "^3.2.3",
"@types/tmp": "^0.1.0",
"@types/yargs": "^15.0.4",
"blinking": "~0.0.3",
"bluebird": "^3.7.2",
"body-parser": "^1.19.0",
@ -90,7 +92,7 @@
"json-mask": "^0.3.9",
"knex": "^0.15.2",
"lint-staged": "^10.0.8",
"livepush": "^3.0.3",
"livepush": "^3.2.2",
"lockfile": "^1.0.4",
"lodash": "^4.17.15",
"log-timestamp": "^0.1.2",
@ -101,10 +103,12 @@
"morgan": "^1.10.0",
"mz": "^2.7.0",
"network-checker": "^0.1.1",
"nodemon": "^2.0.2",
"pinejs-client-request": "^5.2.0",
"pretty-ms": "^5.1.0",
"request": "^2.88.2",
"resin-cli-visuals": "^1.5.2",
"resin-docker-build": "^1.1.4",
"resin-register-device": "^3.0.0",
"resumable-request": "^2.0.0",
"rimraf": "^2.7.1",
@ -113,6 +117,7 @@
"sinon": "^7.5.0",
"sinon-chai": "^3.5.0",
"strict-event-emitter-types": "^2.0.0",
"tar-stream": "^2.1.2",
"terser": "^3.17.0",
"tmp": "^0.1.0",
"ts-loader": "^5.4.5",

View File

@ -1,15 +0,0 @@
#!/bin/bash
IMAGE=$1
retries=0
while [ "$retries" -lt 3 ]; do
let retries=retries+1
docker push $IMAGE
ret=$?
if [ "$ret" -eq 0 ]; then
break
fi
done
exit $ret

View File

@ -1,144 +0,0 @@
#!/usr/bin/env node
const helpText = `Sync changes in the javascript code to a running local mode supervisor on a device on the local network
Usage:
./sync-debug.js <device IP>
Note that the device should be running a debug image.`;
const argv = require('yargs')
.command(
'$0 <device IP>',
'Sync changes in code to a running debug mode supervisor on a local device',
yargs =>
yargs.positional('device IP', {
type: 'string',
describe: 'The address of a local device',
}),
)
.usage(helpText)
.version(false)
.option('noinit', {
boolean: true,
describe: "Don't do an initial sync of files",
default: false,
})
.alias('h', 'help').argv;
const ip = argv.deviceIP;
const { Livepush } = require('livepush');
const { fs } = require('mz');
const dockerode = require('dockerode');
const chokidar = require('chokidar');
const _ = require('lodash');
let lastReadTimestamp = null;
const setupLogs = async (containerId, docker) => {
const container = docker.getContainer(containerId);
const stream = await container.logs({
stdout: true,
stderr: true,
follow: true,
timestamps: true,
// We start from 0, as we risk not getting any logs to
// properly seed the value if the host and remote times differ
since: lastReadTimestamp != null ? lastReadTimestamp : 0,
});
stream.on('data', chunk => {
const { message, timestamp } = extractMessage(chunk);
lastReadTimestamp = Math.floor(timestamp.getTime() / 1000);
process.stdout.write(message);
});
stream.on('end', () => {
setupLogs(containerId, docker);
});
};
function extractMessage(msgBuf) {
// Non-tty message format from:
// https://docs.docker.com/engine/api/v1.30/#operation/ContainerAttach
if (_.includes([0, 1, 2], msgBuf[0])) {
// Take the header from this message, and parse it as normal
msgBuf = msgBuf.slice(8);
}
const str = msgBuf.toString();
const space = str.indexOf(' ');
return {
timestamp: new Date(str.slice(0, space)),
message: str.slice(space + 1),
};
}
const docker = new dockerode({
host: ip,
port: 2375,
});
let changedFiles = [];
let deletedFiles = [];
const performLivepush = _.debounce(async livepush => {
await livepush.performLivepush(changedFiles, deletedFiles);
changedFiles = [];
deletedFiles = [];
}, 1000);
(async () => {
console.log('Starting up...');
// Get the supervisor container id
const container = await docker.getContainer('resin_supervisor').inspect();
console.log('Supervisor container id: ', container.Id);
const containerId = container.Id;
const image = container.Image;
setupLogs(containerId, docker);
const livepush = await Livepush.init({
dockerfileContent: await fs.readFile('Dockerfile.debug'),
context: '.',
containerId,
// a bit of a hack, as the multistage images aren't
// present, but it shouldn't make a difference as these
// will never change
stageImages: _.times(6, () => image),
docker,
});
chokidar
.watch('.', {
ignored: /((^|[\/\\])\..|node_modules.*)/,
ignoreInitial: argv.noinit,
})
.on('add', path => {
changedFiles.push(path);
performLivepush(livepush, containerId, docker);
})
.on('change', path => {
changedFiles.push(path);
performLivepush(livepush, containerId, docker);
})
.on('unlink', path => {
deletedFiles.push(path);
performLivepush(livepush, containerId, docker);
});
livepush.on('commandExecute', ({ command }) => {
console.log('SYNC: executing:', command);
});
livepush.on('commandOutput', ({ output }) => {
const message = output.data.toString();
if (message.trim().length !== 0) {
process.stdout.write(`\t${message}`);
}
});
livepush.on('commandReturn', ({ returnCode }) => {
if (returnCode !== 0) {
console.log(`\tSYNC: Command return non zero exit status: ${returnCode}`);
}
});
livepush.on('containerRestart', () => {
console.log('SYNC: Restarting container...');
});
})();

178
sync/device.ts Normal file
View File

@ -0,0 +1,178 @@
import * as Docker from 'dockerode';
import { Dockerfile } from 'livepush';
import * as _ from 'lodash';
import { Builder } from 'resin-docker-build';
import { promises as fs } from 'fs';
import { child_process } from 'mz';
import * as Path from 'path';
import { Duplex, Readable } from 'stream';
import * as tar from 'tar-stream';
export function getDocker(deviceAddress: string): Docker {
return new Docker({
host: deviceAddress,
// TODO: Make this configurable
port: 2375,
});
}
export async function getSupervisorContainer(
docker: Docker,
requireRunning: boolean = false,
): Promise<Docker.ContainerInfo> {
// First get the supervisor container id
const containers = await docker.listContainers({
filters: { name: ['resin_supervisor'] },
all: !requireRunning,
});
if (containers.length !== 1) {
throw new Error('supervisor container not found');
}
return containers[0];
}
export async function getDeviceArch(docker: Docker): Promise<string> {
try {
const supervisorContainer = await getSupervisorContainer(docker);
const arch = supervisorContainer.Labels?.['io.balena.architecture'];
if (arch == null) {
// We can try to inspect the image for the
// architecture if this fails
const match = /(amd64|i386|aarch64|armv7hf|rpi)/.exec(
supervisorContainer.Image,
);
if (match != null) {
return match[1];
}
throw new Error('supervisor container does not have architecture label');
}
return arch.trim();
} catch (e) {
throw new Error(
`Unable to get device architecture: ${e.message}.\nTry specifying the architecture with -a.`,
);
}
}
export async function getCacheFrom(docker: Docker): Promise<string[]> {
const container = await getSupervisorContainer(docker);
return [container.Image];
}
// perform the build and return the image id
export async function performBuild(
docker: Docker,
dockerfile: Dockerfile,
dockerOpts: { [key: string]: any },
): Promise<void> {
const builder = Builder.fromDockerode(docker);
// tar the directory, but replace the dockerfile with the
// livepush generated one
const tarStream = await tarDirectory(Path.join(__dirname, '..'), dockerfile);
return new Promise((resolve, reject) => {
builder.createBuildStream(dockerOpts, {
buildSuccess: () => {
resolve();
},
buildFailure: reject,
buildStream: (stream: Duplex) => {
stream.pipe(process.stdout);
tarStream.pipe(stream);
},
});
});
}
async function tarDirectory(
dir: string,
dockerfile: Dockerfile,
): Promise<Readable> {
const pack = tar.pack();
const add = async (path: string) => {
const entries = await fs.readdir(path);
for (const entry of entries) {
const newPath = Path.resolve(path, entry);
const stat = await fs.stat(newPath);
if (stat.isDirectory()) {
await add(newPath);
} else {
// Here we filter the things we don't want
if (
newPath.includes('node_modules/') ||
newPath.includes('.git/') ||
newPath.includes('build/') ||
newPath.includes('coverage/')
) {
continue;
}
if (newPath.endsWith('Dockerfile')) {
pack.entry(
{ name: 'Dockerfile', mode: stat.mode, size: stat.size },
dockerfile.generateLiveDockerfile(),
);
continue;
}
pack.entry(
{
name: Path.relative(dir, newPath),
mode: stat.mode,
size: stat.size,
},
await fs.readFile(newPath),
);
}
}
};
await add(dir);
pack.finalize();
return pack;
}
// Absolutely no escaping in this function, just be careful
async function runSshCommand(address: string, command: string) {
// TODO: Make the port configurable
const [stdout] = await child_process.exec(
'ssh -p 22222 -o LogLevel=ERROR ' +
'-o StrictHostKeyChecking=no ' +
'-o UserKnownHostsFile=/dev/null ' +
`root@${address} ` +
`"${command}"`,
);
return stdout;
}
export function stopSupervisor(address: string) {
return runSshCommand(address, 'systemctl stop resin-supervisor');
}
export function startSupervisor(address: string) {
return runSshCommand(address, 'systemctl start resin-supervisor');
}
export async function replaceSupervisorImage(
address: string,
imageName: string,
imageTag: string,
) {
// TODO: Maybe don't overwrite the LED file?
const fileStr = `#This file was edited by livepush
SUPERVISOR_IMAGE=${imageName}
SUPERVISOR_TAG=${imageTag}
LED_FILE=/dev/null
`;
return runSshCommand(
address,
`echo '${fileStr}' > /tmp/update-supervisor.conf`,
);
}

53
sync/init.ts Normal file
View File

@ -0,0 +1,53 @@
import * as Bluebird from 'bluebird';
import * as Docker from 'dockerode';
import { Dockerfile } from 'livepush';
import * as device from './device';
interface Opts {
address: string;
imageName: string;
imageTag: string;
docker: Docker;
dockerfile: Dockerfile;
nocache: boolean;
arch?: string;
}
export async function initDevice(opts: Opts) {
const arch = opts.arch ?? (await device.getDeviceArch(opts.docker));
const image = `${opts.imageName}:${opts.imageTag}`;
await device.performBuild(opts.docker, opts.dockerfile, {
buildargs: { ARCH: arch },
t: image,
labels: { 'io.balena.livepush-image': '1', 'io.balena.architecture': arch },
cachefrom: (await device.getCacheFrom(opts.docker)).concat(image),
nocache: opts.nocache,
});
// Now that we have our new image on the device, we need
// to stop the supervisor, update
// /tmp/update-supervisor.conf with our version, and
// restart the supervisor
await device.stopSupervisor(opts.address);
await device.replaceSupervisorImage(
opts.address,
opts.imageName,
opts.imageTag,
);
await device.startSupervisor(opts.address);
let supervisorContainer: undefined | Docker.ContainerInfo;
while (supervisorContainer == null) {
try {
supervisorContainer = await device.getSupervisorContainer(
opts.docker,
true,
);
} catch (e) {
await Bluebird.delay(500);
}
}
return supervisorContainer.Id;
}

63
sync/livepush.ts Normal file
View File

@ -0,0 +1,63 @@
import * as chokidar from 'chokidar';
import * as Docker from 'dockerode';
import * as _ from 'lodash';
import * as Path from 'path';
import { Dockerfile, Livepush } from 'livepush';
// TODO: Pass build args to the livepush process
export async function startLivepush(opts: {
dockerfile: Dockerfile;
containerId: string;
docker: Docker;
noinit: boolean;
}) {
const livepush = await Livepush.init({
...opts,
context: Path.join(__dirname, '..'),
stageImages: [],
});
livepush.addListener('commandExecute', ({ command }) => {
console.log(`Executing command: ${command} `);
});
livepush.addListener('commandReturn', ({ returnCode }) => {
if (returnCode !== 0) {
console.log(` Command executed with code ${returnCode}`);
}
});
livepush.addListener('commandOutput', ({ output }) => {
console.log(output.data.toString());
});
livepush.addListener('containerRestart', () => {
console.log('Restarting container');
});
const livepushExecutor = getExecutor(livepush);
chokidar
.watch('.', {
ignored: /((^|[\/\\])\..|node_modules.*|sync\/.*)/,
ignoreInitial: opts.noinit,
})
.on('add', path => livepushExecutor(path))
.on('change', path => livepushExecutor(path))
.on('unlink', path => livepushExecutor(undefined, path));
}
const getExecutor = (livepush: Livepush) => {
const changedFiles: string[] = [];
const deletedFiles: string[] = [];
const actualExecutor = _.debounce(async () => {
await livepush.performLivepush(changedFiles, deletedFiles);
});
return (changed?: string, deleted?: string) => {
if (changed) {
changedFiles.push(changed);
}
if (deleted) {
deletedFiles.push(deleted);
}
actualExecutor();
};
};

46
sync/logs.ts Normal file
View File

@ -0,0 +1,46 @@
import * as Docker from 'dockerode';
import * as _ from 'lodash';
export async function setupLogs(
docker: Docker,
containerId: string,
lastReadTimestamp = 0,
) {
const container = docker.getContainer(containerId);
const stream = await container.logs({
stdout: true,
stderr: true,
follow: true,
timestamps: true,
since: lastReadTimestamp,
});
stream.on('data', chunk => {
const { message, timestamp } = extractMessage(chunk);
// Add one here, other we can end up constantly reading
// the same log line
lastReadTimestamp = Math.floor(timestamp.getTime() / 1000) + 1;
process.stdout.write(message);
});
// This happens when a container is restarted
stream.on('end', () => {
setupLogs(docker, containerId, lastReadTimestamp);
});
}
function extractMessage(msgBuf: Buffer) {
// Non-tty message format from:
// https://docs.docker.com/engine/api/v1.30/#operation/ContainerAttach
if ([0, 1, 2].includes(msgBuf[0])) {
// Take the header from this message, and parse it as normal
msgBuf = msgBuf.slice(8);
}
const str = msgBuf.toString();
const space = str.indexOf(' ');
return {
timestamp: new Date(str.slice(0, space)),
message: str.slice(space + 1),
};
}

88
sync/sync.ts Normal file
View File

@ -0,0 +1,88 @@
import * as packageJson from '../package.json';
import * as livepush from 'livepush';
import { fs } from 'mz';
import * as yargs from 'yargs';
import * as device from './device';
import * as init from './init';
import { startLivepush } from './livepush';
import { setupLogs } from './logs';
const helpText = `Sync changes code to a running supervisor on a device on the local network
Usage:
npm run sync <device IP>
`;
const argv = yargs
.command(
'$0 <device-address>',
'Sync changes in code to a running debug mode supervisor on a local device',
y =>
y.positional('device-address', {
type: 'string',
describe: 'The address of a local device',
}),
)
.option('device-arch', {
alias: 'a',
type: 'string',
description:
'Specify the device architecture (use this when the automatic detection fails)',
choices: ['amd64', 'i386', 'aarch64', 'armv7hf', 'rpi'],
})
.options('image-name', {
alias: 'i',
type: 'string',
description: 'Specify the name to use for the supervisor image on device',
default: 'livepush-supervisor',
})
.options('image-tag', {
alias: 't',
type: 'string',
description: 'Specify the tag to use for the supervisor image on device',
default: packageJson.version,
})
.options('nocache', {
description: 'Run the intial build without cache',
type: 'boolean',
default: false,
})
.usage(helpText)
.version(false)
.scriptName('npm run sync --')
.alias('h', 'help').argv;
(async () => {
const address = argv['device-address']!;
const dockerfile = new livepush.Dockerfile(await fs.readFile('Dockerfile'));
try {
const docker = device.getDocker(address);
const containerId = await init.initDevice({
address,
docker,
dockerfile,
imageName: argv['image-name'],
imageTag: argv['image-tag'],
arch: argv['device-arch'],
nocache: argv['nocache'],
});
console.log('==================================================');
console.log('Supervisor container id: ', containerId);
console.log('==================================================');
await setupLogs(docker, containerId);
await startLivepush({
dockerfile,
containerId,
docker,
noinit: true,
});
} catch (e) {
console.error('Error:');
console.error(e.message);
}
})();

View File

@ -10,8 +10,8 @@ Usage:
The script will first build a non-optimized version of the js code and sync the resulting app.js
onto the supervisor container at the specified IP. It will also restart the supervisor container.
The device must be a development variant of balenaOS and the supervisor must be running.
`)
process.exit(1)
`);
process.exit(1);
}
const childProcess = require('child_process');
@ -32,17 +32,20 @@ const syncOpts = {
childProcess.execSync('npm install', { stdio: 'inherit' });
compiler.watch({
ignored: /node_modules/,
}, (err, stats) => {
if (err) {
console.error(err);
return;
}
console.log(stats.toString({ colors: true }));
if (stats.hasErrors()) {
console.error('Skipping sync due to errors');
return;
}
doSync(syncOpts);
});
compiler.watch(
{
ignored: /node_modules/,
},
(err, stats) => {
if (err) {
console.error(err);
return;
}
console.log(stats.toString({ colors: true }));
if (stats.hasErrors()) {
console.error('Skipping sync due to errors');
return;
}
doSync(syncOpts);
},
);