diff --git a/sgx-jvm/containers/core/Dockerfile b/sgx-jvm/containers/core/Dockerfile new file mode 100644 index 0000000000..593b277742 --- /dev/null +++ b/sgx-jvm/containers/core/Dockerfile @@ -0,0 +1,104 @@ +FROM ubuntu:xenial-20171114 + +# General information + +LABEL version="1.0" +LABEL description="SGX build and test container" +LABEL maintainer="tommy.lillehagen@r3.com" + +# Configure package management software + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive +RUN apt-get update -qqy +RUN apt-get install -qqy software-properties-common apt-utils +RUN add-apt-repository \ + "deb http://archive.ubuntu.com/ubuntu/ trusty main restricted" +RUN add-apt-repository \ + "deb http://archive.ubuntu.com/ubuntu/ trusty universe" +RUN apt-get update -qqy + +# Install dependencies (lock versions) + +RUN apt-get install -qqy \ + autoconf=2.69-9 \ + ccache=3.2.4-1 \ + cmake=3.5.1-1ubuntu3 \ + cpio=2.11+dfsg-5ubuntu1 \ + exuberant-ctags=1:5.9~svn20110310-11 \ + g++=4:5.3.1-1ubuntu1 \ + gcc=4:5.3.1-1ubuntu1 \ + gdb=7.11.1-0ubuntu1~16.5 \ + gdbserver=7.11.1-0ubuntu1~16.5 \ + git=1:2.7.4-0ubuntu1.3 \ + libcurl3=7.47.0-1ubuntu2.5 \ + libcurl4-openssl-dev=7.47.0-1ubuntu2.5 \ + libprotobuf8=2.5.0-9ubuntu1 \ + libssl-dev=1.0.2g-1ubuntu4.9 \ + libtool=2.4.6-0.1 \ + libunwind8=1.1-4.1 \ + make=4.1-6 \ + ocaml=4.02.3-5ubuntu2 \ + openjdk-8-jdk=8u151-b12-0ubuntu0.16.04.2 \ + openssl=1.0.2g-1ubuntu4.9 \ + patch=2.7.5-1 \ + proguard=5.2.1-3 \ + python2.7=2.7.12-1ubuntu0~16.04.2 \ + unzip=6.0-20ubuntu1 \ + wget=1.17.1-1ubuntu1.3 \ + zip=3.0-11 \ + zlib1g-dev=1:1.2.8.dfsg-2ubuntu4.1 + +RUN apt-get install -qqy -t trusty \ + protobuf-compiler=2.6.1-1.3 \ + libprotobuf-dev=2.6.1-1.3 + +# Environment + +ENV SHELL /bin/bash +ENV HOME /root +ENV CODE /code +ENV SGX_SDK /sgx + +# Volumes and work directory + +VOLUME ${HOME} +VOLUME ${CODE} +VOLUME ${SGX_SDK} +RUN mkdir -p ${HOME} +RUN mkdir -p ${CODE} +RUN mkdir -p ${SGX_SDK} +WORKDIR ${CODE} + +# Expose ports for remote GDB and Java debugging, and test servers + +EXPOSE 2000 5005 8080 9080 + +# Set up Java and SGX environment + +ENV JAVA_HOME /usr/lib/jvm/java-8-openjdk-amd64 + +ENV SGX_BIN ${SGX_SDK}/sgxsdk/bin:${SGX_SDK}/sgxsdk/bin/x64 +ENV PATH ${PATH}:${JAVA_HOME}/jre/bin:${JAVA_HOME}/bin:${SGX_BIN} + +ENV LD_LIBRARY_PATH ${LD_LIBRARY_PATH}:${CODE}/sgx-jvm/linux-sgx/build/linux + +# Set Python 2.7 as the default version + +RUN ln -fs \ + /usr/bin/python2.7 \ + /usr/bin/python + +# Link libcrypto properly + +RUN ln -fs \ + /lib/x86_64-linux-gnu/libcrypto.so.1.0.0 \ + /usr/lib/x86_64-linux-gnu/libcrypto.so + +# Location for the UNIX socket belonging to the Intel AESM service + +RUN mkdir -p /var/run/aesmd/ + +# Update ProGuard to version 6 beta + +ADD dependencies/proguard6.0beta1.tar.gz /usr/share/ diff --git a/sgx-jvm/containers/core/Makefile b/sgx-jvm/containers/core/Makefile new file mode 100644 index 0000000000..9cf58ddf45 --- /dev/null +++ b/sgx-jvm/containers/core/Makefile @@ -0,0 +1,21 @@ +.PHONY: container sgsdk + +SHELL = /bin/bash +MAKEFILE_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +container: Dockerfile + docker build -t sgx-ra-core . + +sgxsdk: + @mkdir -p ~/.container/sgx + @if [ ! -e ~/.container/sgx/sgxsdk ]; then \ + docker run --rm \ + -v ${HOME}/.container/sgx:/sgx \ + -v ${MAKEFILE_DIR}/../../../../:/code sgx-ra-core bash \ + /code/sgx-jvm/linux-sgx/linux/installer/bin/build-installpkg.sh sdk; \ + docker run --rm \ + -v ${HOME}/.container/sgx:/sgx \ + -v ${MAKEFILE_DIR}/../../../../:/code -it sgx-ra-core bash \ + /code/sgx-jvm/linux-sgx/linux/installer/bin/sgx_linux_x64_sdk_1.9.100.39124.bin \ + -prefix=/sgx; \ + fi diff --git a/sgx-jvm/containers/core/dependencies/proguard6.0beta1.tar.gz b/sgx-jvm/containers/core/dependencies/proguard6.0beta1.tar.gz new file mode 100644 index 0000000000..7334e2c610 Binary files /dev/null and b/sgx-jvm/containers/core/dependencies/proguard6.0beta1.tar.gz differ diff --git a/sgx-jvm/environment b/sgx-jvm/environment new file mode 100755 index 0000000000..76ab50060a --- /dev/null +++ b/sgx-jvm/environment @@ -0,0 +1,18 @@ +#!/bin/bash + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# make the sx tool available for the user +alias sx="bash ${DIR}/tools/sx/sx" + +# load sx configuration +if [ -e "/tmp" ]; then + TMPDIR=${TMPDIR:-/tmp} +fi +TMP_CONFIG="${TMPDIR}/sx.config" +if [ -z "${TMPDIR}" ]; then + TMP_CONFIG=$(tempfile) +fi + +sx shell auto-completion > "${TMP_CONFIG}" +source "${TMP_CONFIG}" diff --git a/sgx-jvm/tools/sx/README.md b/sgx-jvm/tools/sx/README.md new file mode 100644 index 0000000000..126eb58c99 --- /dev/null +++ b/sgx-jvm/tools/sx/README.md @@ -0,0 +1,238 @@ +# SGX Build Container and Utilities + +## Project Organisation + + * **Containers** + + To pin down dependencies and simplify development and testing, we have a + Docker image with all necessary compile- and run-time dependencies + pre-installed. This image supports mounting of volumes for the user's home + directory, code repository, and SGX SDK directory. It also exposes various + ports for debuggable targets (JVM and native). To run SGX-enabled + applications in hardware mode, the user must pass in a reference to the SGX + kernel driver (which is done automagically if the `sx` command is used). + + * **Tools** + + `sx` is a utility that simplifies running builds and tests inside the SGX + container, and also provides some additional helper functions for things + like generating tags databases, starting debug servers, etc. + + +## Getting Started + +To get started, run the following commands in `sgx-jvm`: + +```bash +> source environment +> sx help +``` +Yielding the following output: + +``` +usage: sx + + + build build project in container ( ) + containers actions related to containers + debug actions related to debugging + exec shorthand for `containers exec core` + get-started build containers and key components + help show help information + hsm actions related to the hsm simulator + logs tail application logs + reports actions related to reports + shell show information about shell commands + tags actions related to tag databases + + + -c colours = on | off (-C) + -d debug = on | off (-D) + -f force operation + -h hardware = on | off (-s) + -r target = release | pre-release (-p) + -s hsm profile = simulator | development hsm (-S) | production (-P) + -t tty = on | off (-T) + -v verbose output + + + LINES number of lines to return from the end of the log files (default 50) + PORT port number used for connecting to the ISV (default 9080) +``` + +The first command simply sets up an alias pointing to `sgx-jvm/tools/sx/sx`, +and enables Bash auto-completion for the various command options. For example: + +```bash +> sx b # will expand to "sx build" +``` + +The second command shows all the available sub-commands and options. + +If this is your first time using `sx`, you will most likely have to build the +Docker container used for building and running the various components of the +SGX projects. To do that, run the command: + +```bash +> sx get-started +``` + +This command will also set up default configuration for SGX-GDB, both inside +and outside of the container, and Visual Studio Code configuration if you fancy +running remote debugging sessions from an IDE. + +## Building Components + +As an example, this section will go through the process of building the various +components of the remote attestation project. + +### Enclave + +To build the enclave and sign it with a self-signed OpenSSL certificate (for +testing), run the following command: + +```bash +> sx build remote-attestation/enclave clean all +``` + +This command runs `make -C sgx-jvm/remote-attestation/enclave clean all` inside +the SGX container. + +To build the enclave in hardware and pre-release mode, use the `-h` and `-p` +switches like this: + +```bash +> sx build -hp remote-attestation/enclave clean all +``` + +### Host + +Similarly, to build the host (JVM-layer), you can run the following command: + +```bash +> sx build remote-attestation/host +``` + +This will run `gradlew` in the `host/` directory, with the necessary paths and +environment variables set. + +### JNI Library + +This is a native library, so you can compile it either for use with software +simulation or hardware. + +```bash +> sx build remote-attestation/host/native # simulation, debug mode +# or: +> sx build -hp remote-attestation/host/native # hardware, pre-release mode +``` + +As part of the build, as seen in `host/native/Makefile`, we run `javah` on the +`NativeWrapper` class to extract its JNI mapping. This mapping will be written +to `wrapper.hpp`. This means that the JVM-layer needs building _prior_ to this +step. + +## Running and Debugging Components + +### Unit Tests + +The unit tests are run through Gradle inside the SGX container, with the +various paths set to necessary dependencies. For instance, we need to set the +`java.library.path` and `corda.sgx.enclave.path` variables to point to the JNI +library and the enclave shared object, respectively. This is all done for you +by the Gradle build script, the container, and the `sx` tool. + +Provided that you have built the aforementioned components, you can now run the +unit tests with the following command: + +```bash +> sx build remote-attestation/host unit-tests +``` + +You can open the output report by issuing the following command: + +```bash +> sx reports unit-tests +``` + +### Integration Tests + +Similarly, you can run the integration tests with the following command: + +```bash +> sx build remote-attestation/host integration-tests +``` + +This requires that the service provider (in the future challenger and IAS +proxy) is running. Say that the service is running on port 12345, you can run +the tests like this: + +```bash +> PORT=12345 sx build remote-attestation/host integration-tests +``` + +You can open the output report by issuing the following command: + +```bash +> sx reports integration-tests +``` + +If you want to explore the logs, you can use the `logs` command: + +```bash +> LINES=100 sx logs +``` + +### Test Flow + +There is also a simple attestation flow which similarly to the integration test +requires the service provider to run on a specific port. This flow can be run +with the `sx build` command. + +To run the simple flow without attaching a debugger, run: + +```bash +> PORT=8080 sx build remote-attestation/host run +``` + +There are a few different debug targets depending on how you want to run your +debugger: + + * **Local** + + Runs `gdb` inside the Docker container (if you don't have `gdb` + installed on your computer): `run-local`. + + * **Remote** + + Runs `gdbserver` inside the Docker container so that you can attach to it + from the host computer or another machine: `run-remote`. + + * **SGX** + + Runs `sgx-gdb` inside the Docker container (if you don't have `sgx-gdb` + installed on your computer): `run-sgx`. This lets you step through + enclave-code, inspect stack traces in the trusted environment, etc. + Obviously, this is only possible if the program has been compiled for + debug and simulation mode. + +For all of the above, and for the unit and integration tests, you can attach a +Java debugger remotely as well, using JDWP. + +## Other Tools + +### CTags + +For the C/C++ part of the project, you might wish to construct a tags file to +easily jump back and forth between symbols. You can construct this either with +or without the symbols from the Linux SGX SDK: + +```bash +> sx tags lean remote-attestation # Remote Attestation project only +> sx tags full remote-attestation # Include symbols from the SGX SDK +``` + +## Dependencies + + * **Intel SGX SDK** – [01org/linux-sgx](https://github.com/01org/linux-sgx) + * **Intel SGX Driver** – [01org/linux-sgx-driver](https://github.com/01org/linux-sgx-driver) diff --git a/sgx-jvm/tools/sx/sx b/sgx-jvm/tools/sx/sx new file mode 100755 index 0000000000..fc906b5557 --- /dev/null +++ b/sgx-jvm/tools/sx/sx @@ -0,0 +1,898 @@ +#!/usr/bin/env bash + +# {{{ Environment + +# Locations +version="0.1" +file="${BASH_SOURCE[0]}" +base_dir="$(cd "$(dirname "$file")/../.." && pwd)" +repo_dir="$(cd "${base_dir}/.." && pwd)" +repo_base="sgx-jvm" +progname="$(basename "$file")" +sx=${file} + +# Configuration variables +VERBOSE=off +FORCE=off +TTY_MODE=on +COLOUR_MODE=on +TABSTOP=23 + +# Build variables +DEBUG_MODE=on +TARGET_CONFIG=debug +HARDWARE_MODE=off +USE_NATIVE_LOGGING=off +HSM_PROFILE=dev_sim + +# Runtime variables +ISV_PORT=${PORT:-9080} +LINES=${LINES:-50} + +# Debug variables +gdb_port=2000 +jdwp_port=5005 + +# Docker environment +docker_ip=$(ifconfig docker0 2> /dev/null | sed -n 's/^.*inet \([^ ]*\).*/\1/p') +if [ -z "$docker_ip" ]; then + docker_ip="192.168.65.1" +fi + +# }}} Environment + +# {{{ Logging + +# {{{ Colours and Formatting + +update_colours() { + if test -t 1; then + local ncolours=$(tput colors) + if test -n "$ncolours" && test $ncolours -ge 8; then + TABSTOP=34 + bold="$(tput bold)" + underline="$(tput smul)" + standout="$(tput smso)" + normal="$(tput sgr0)" + black="$(tput setaf 0)" + red="$(tput setaf 1)" + green="$(tput setaf 2)" + yellow="$(tput setaf 3)" + blue="$(tput setaf 4)" + magenta="$(tput setaf 5)" + cyan="$(tput setaf 6)" + white="$(tput setaf 7)" + fi + fi + if [ "$COLOUR_MODE" == "off" ]; then + TABSTOP=23 + bold="" + underline="" + standout="" + normal="" + black="" + red="" + green="" + yellow="" + blue="" + magenta="" + cyan="" + white="" + fi +} +update_colours + +p_value() { + local key="$1" + shift 1 + echo -e " ${yellow}${key}${normal}\t$@" | expand -t${TABSTOP} +} + +p_value_verbose() { + if [ "${VERBOSE}" == "on" ]; then + local key="$1" + shift 1 + p_value "$key" "$@" + fi +} + +nl_verbose() { + if [ "${VERBOSE}" == "on" ]; then + echo + fi +} + +# }}} Colours and Formatting + +verbose() { + if [ "$VERBOSE" == "on" ]; then + info "$@" + fi +} + +info() { + echo -e "${blue}$(date +'%H:%M:%S') ${yellow}info:${normal} $@" +} + +warn() { + echo -e "${blue}$(date +'%H:%M:%S') ${red}warn:${normal} $@" > /dev/stderr +} + +error() { + echo -e "${blue}$(date +'%H:%M:%S') ${red}error:${normal} $@" > /dev/stderr + exit 1 +} + +# }}} Logging + +# {{{ Utilities +trim() { + sed 's/^[ ]*//' | sed 's/[ ]*$//' | sed 's/[ ][ ]*/ /g' +} + +browse() { + local url="$1" + for b in "${BROWSER}" firefox chromium chrome opera open ; do + if [ ! -z "$(which $b)" ]; then + $b "${url}" 2>&1 > /dev/null & + exit + fi + done + warn "unable to find preferred browser" + echo "report location: $url" +} +# }}} Utilities + +# {{{ Auto-Completion +print_autocompletion() { +cat<${normal}" + local category=$1 + local options=$2 + sed -n "s/^[ ]*\([a-z-]*\)) # ${category}: \(.*\)$/ ${blue}\1${normal} \2/p" \ + "${file}" | sed "/ hidden/d" | expand -t ${TABSTOP} | sort + echo + + if [ "$options" != "NONE" ]; then + echo -e "${magenta}${normal}" + sed -n "s/^[ ]*\([A-Za-z-]*\)) # (option): \(.*\)$/ ${blue}-\1${normal} \2/p" \ + "${file}" | expand -t ${TABSTOP} | sort | grep $options + echo + fi + + echo -e "${green}${normal}" + echo -e " ${blue}LINES${normal}\tnumber of lines to return from the end of the log files (default 50)" \ + | expand -t ${TABSTOP} + echo -e " ${blue}PORT${normal}\tport number used for connecting to the ISV (default 9080)" \ + | expand -t ${TABSTOP} + echo + + exit 1 +} + +print_usage() { + local category=$1 + local options=$2 + echo -n -e "${bold}usage:${normal} ${green}${normal} ${progname} " + if [ "$category" != "root" ]; then + echo -n -e "${bold}${category}${normal} " + fi + echo -e "${magenta} ${normal}" + echo + print_commands $category $options +} +# }}} Print Commands and Usage + +# {{{ Container Utilities +in_container() { + local container="${1:-core}" + local CODE=$(pwd | sed 's/enterprise\/sgx-jvm.*$/enterprise/') + local tty="" + shift 1 + mkdir -p ${HOME}/.container + + if [ "${TTY_MODE}" == "on" ]; then + tty="-t" + fi + + local privileged="" + local isgx_device="" + local mei0_device="" + local ports="" + local aesm_socket="" + privileged="--privileged" + if [ -e "/dev/isgx" ]; then + isgx_device="--device /dev/isgx" + if [ -e "/dev/mei0" ]; then + mei0_device="--device /dev/mei0" + fi + fi + ports="-p ${gdb_port}:${gdb_port} -p ${jdwp_port}:${jdwp_port}" + local sock="/var/run/aesmd/aesm.socket" + if [ -e "${sock}" ]; then + aesm_socket="-v ${sock}:${sock}" + fi + + [ -z "$(docker images -q sgx-ra-${container})" ] && \ + error "cannot find container image with name ${magenta}sgx-ra-${container}${normal}" + + local c=$(docker images sgx-ra-${container} | tail -n1 | tr -s ' ()' '\t') + local container_name=$(echo "${c}" | cut -f 1) + local container_tag=$(echo "${c}" | cut -f 2) + local container_hash=$(echo "${c}" | cut -f 3) + + verbose "running command in container" + nl_verbose + + p_value_verbose "container image" "${container_name}:${container_tag} (${container_hash})" + p_value_verbose "network" "${docker_ip}" + + if [ ! -z "${ports}" ]; then + p_value_verbose "ports" "${gdb_port}, ${jdwp_port}" + fi + + if [ ! -z "$(echo "$@")" ]; then + p_value_verbose "command" "$@" + fi + + if [ ! -z "${isgx_device}" ]; then + p_value_verbose "devices" "${isgx_device} ${mei0_device}" + fi + + nl_verbose + + docker run --rm -i ${tty} \ + ${privileged} \ + --network host \ + --add-host="localhost:${docker_ip}" \ + -v ${CODE}:/code \ + -v ${HOME}/.container:/root \ + -v ${HOME}/.container/sgx:/sgx \ + -e "PORT=${ISV_PORT}" \ + ${ports} \ + ${isgx_device} \ + ${mei0_device} \ + ${aesm_socket} \ + sgx-ra-${container} "$@" + + nl_verbose + verbose "execution completed" + echo +} + +container_make() { + local dir="$1" + shift 1 + TTY_MODE=no + in_container core make -C ${repo_base}/${dir} "$@" +} +# }}} Container Utilities + +# {{{ Debug Utilities +debug_server() { + local docker_process=$(docker ps | grep sgx-ra-core | cut -d' ' -f1) + if [ -z "$docker_process" ]; then + error "no debuggable processes running" + fi + + local process=$(docker exec -it "$docker_process" pidof java | cut -d' ' -f1) + + nohup docker exec -t "$docker_process" \ + gdbserver --attach localhost:2000 "$process" 2>&1 > /dev/null & +} +# }}} Debug Utilities + +# {{{ Command: Build +cmd_build() { + if [ "${1:0:1}" == "-" ]; then + options "$@" + shift 1 + fi + local dir="${1:-.}" + shift 1 + info "building ${dir} $@" + local vars="" + if [ "${HARDWARE_MODE}" == "on" ]; then + p_value_verbose "mode" "hardware" + vars="${vars} SGX_USE_HARDWARE=TRUE" + else + p_value_verbose "mode" "simulation" + vars="${vars} SGX_USE_HARDWARE=FALSE" + fi + if [ "${DEBUG_MODE}" == "on" ]; then + p_value_verbose "debug" "on" + vars="${vars} SGX_DEBUG_MODE=TRUE" + else + p_value_verbose "debug" "off" + vars="${vars} SGX_DEBUG_MODE=FALSE" + fi + if [ "${TARGET_CONFIG}" == "release" ]; then + p_value_verbose "configuration" "release" + vars="${vars} SGX_IS_PRERELEASE=FALSE SGX_DEBUG_MODE=FALSE" + elif [ "${TARGET_CONFIG}" == "pre-release" ]; then + p_value_verbose "configuration" "pre-release" + vars="${vars} SGX_IS_PRERELEASE=TRUE" + elif [ "${DEBUG_MODE}" == "on" ]; then + p_value_verbose "configuration" "debug" + vars="${vars} SGX_IS_PRERELEASE=FALSE SGX_DEBUG_MODE=TRUE" + else + p_value_verbose "configuration" "release" + vars="${vars} SGX_IS_PRERELEASE=FALSE SGX_DEBUG_MODE=FALSE" + fi + if [ "${USE_NATIVE_LOGGING}" == "on" ]; then + p_value_verbose "logging" "on" + vars="${vars} LOGGING=TRUE" + else + p_value_verbose "logging" "off" + fi + if [ "${HSM_PROFILE}" == "prod" ]; then + p_value_verbose "hsm profile" "production" + elif [ "${HSM_PROFILE}" == "dev_hsm" ]; then + p_value_verbose "hsm profile" "development hsm" + else + p_value_verbose "hsm profile" "simulator" + fi + vars="${vars} HSM_PROFILE=${HSM_PROFILE}" + echo + container_make ${dir} ${vars} "$@" + echo +} +# }}} Command: Build + +# {{{ Command: Containers +build_container() { + local build_targets="$1" + local target="$2" + local make_target="$3" + if [[ " ${build_targets} " =~ " ${target} " ]]; then + if [ "$FORCE" == "on" ]; then + ${sx} containers remove ${target} + fi + + local images=$(docker images | grep sgx-ra-) + if [ ! -z "${images}" ]; then + exit 0 + fi + + info "building container ${blue}${target}${normal} using target ${magenta}${make_target}${normal} ..." + echo + make -C "${base_dir}/containers/${target}" ${make_target} + echo + fi + +} + +cmd_containers() { + local command=$1 + shift 1 + + local build_targets="$(echo " $@ " | sed 's/ -[A-Za-z]\+ //g' | trim)" + if [ -z "${build_targets}" ]; then + build_targets="core" + fi + + case $command in + build) # containers: create containers for build and testing + options "$@" + verbose "building targets: ${build_targets} ..." + build_container "${build_targets}" "core" "container" + ;; + + install-sdk) # containers: install the sgx sdk for containers + options "$@" + verbose "installing sgx sdk in container ${build_targets} ..." + build_container "${build_targets}" "core" "sgxsdk" + ;; + + exec) # containers: run command in container ( ) + if [ "${1:0:1}" == "-" ]; then + options "$@" + shift 1 + fi + local container="${1:-core}" + shift 1 + in_container "${container}" "$@" + ;; + + running) # containers: show list of running containers + options "$@" + verbose "list of running containers related to this project" + docker ps -a | sed -n -e '1p' -e '/sgx-ra-*/p' + ;; + + list) # containers: show list of available images + options "$@" + verbose "list of available images related to this project" + docker images sgx-ra-* + ;; + + clean) # containers: stop and remove all containers + info "stopping containers ..." + [ ! -z "$(docker ps -aq)" ] && \ + docker ps -aq | xargs docker rm + ;; + + remove) # containers: delete images related to this project () + local image="$1" + if [ -z "${image}" -o "${image:0:1}" == "-" ]; then + error "no filter specified" + else + shift 1 + fi + options "$@" + if [ "$FORCE" == "on" ]; then + ${sx} containers clean + fi + local image_pattern="sgx-ra-${image}*" + info "removing images matching: ${image_pattern} ..." + [ ! -z "$(docker images -q ${image_pattern})" ] && \ + docker images -q ${image_pattern} | xargs docker rmi -f + ;; + + prune) # containers: prune system; stop containers and delete images + docker system prune -af + ;; + + clear-cache) # containers: clear cached home directory for containers + info "deleting files in directory: ${HOME}/.container/" + rm -rf ${HOME}/.container/ + ;; + + *) + options "$@" + print_usage "containers" "\(-f\|-v\)" + ;; + esac +} +# }}} Command: Containers + +# {{{ Command: Debug + +# {{{ GDB Config +write_gdb_config() { +code_dir=${1:-/code} +sgx_dir=${2:-/sgx} +cat< ${HOME}/.container/.gdbinit + write_gdb_config "${repo_dir}" "${HOME}/.container/sgx" > ${HOME}/.gdbinit + info "generating launch configuration for vs code" + mkdir -p "${repo_dir}/${repo_base}/.vscode" + write_vscode_config "${repo_dir}" "${HOME}/.container/sgx" > "${repo_dir}/${repo_base}/.vscode/launch.json" + ;; + + server) # debug: start debug server in container and attach to running java process + debug_server + ;; + + attach) # debug: attach debugger to remote target in container + options "$@" + info "attaching debugger to $docker_ip:$gdb_port ..." + local gdb_exec="gdb" + if [ "${FORCE}" == "on" ]; then + gdb_exec="gdb_" + fi + if [ ! -z "$(which ${gdb_exec})" ]; then + verbose "using native installation of gdb" + gdb -q \ + -ex "target remote $docker_ip:$gdb_port" + else + verbose "using containerised installation of gdb" + ${sx} containers exec core gdb -q \ + -ex "target remote $docker_ip:$gdb_port" + fi + ;; + + *) + options "$@" + print_usage "debug" "NONE" + ;; + esac +} +# }}} Command: Debug + +# {{{ Command: HSM +cmd_hsm() { + local command="$1" + shift 1 + + case $command in + start-simulator) # hsm: start hsm simulator () + local UTIMACO_HSM_DIR=$1 + local TIMESTAMP=$(date +%Y%m%d_%H%M%S) + local SIMULATOR_RUN_DIR=${base_dir}/log/hsm_simulator/$TIMESTAMP + mkdir -p $SIMULATOR_RUN_DIR + bash -c $UTIMACO_HSM_DIR/SDK/Linux/bin/cs_sim.sh \ + -f $SIMULATOR_RUN_DIR/stdout > /dev/null & + ;; + + stop-simulator) # hsm: stop hsm simulator + local jobs=$(ps -o pid,args | \ + grep cs_sim | \ + grep -v grep | \ + cut -d' ' -f 1) + if [ ! -z "${jobs}" ]; then + kill ${jobs} + fi + ;; + + *) + options "$@" + print_usage "hsm" "NONE" + ;; + esac +} +# }}} Command: HSM + +# {{{ Command: Reports + +cmd_reports() { + local command="$1" + shift 1 + + case $command in + unit-tests) # reports: open test report for unit test suite + find "${repo_dir}/${repo_base}" \ + -path "*/build/reports/tests/test/index.html" \ + -exec browse {} \; + ;; + + integration-tests) # reports: open test report for integration tests + find "${repo_dir}/${repo_base}" \ + -path "*/build/reports/tests/integrationTest/index.html" \ + -exec browse {} \; + ;; + + *) + options "$@" + print_usage "reports" "NONE" + ;; + esac +} + +# }}} Command: Reports + +# {{{ Command: Shell +cmd_shell() { + local command="$1" + shift 1 + + case $command in + commands) # shell: list available commands + filter_commands "$@" + ;; + + auto-completion) # shell: print configuration for shell auto-completion + print_autocompletion + ;; + + info) # shell: show configuration based on passed options + options "$@" + p_value "version" "${version}" + p_value "script directory" "${base_dir}" + p_value "repository directory" "${repo_dir}" + p_value "repository base" "${repo_base}" + p_value "debug mode" "${DEBUG_MODE}" + p_value "tty mode" "${TTY_MODE}" + p_value "verbose mode" "${VERBOSE}" + p_value "hardware mode" "${HARDWARE_MODE}" + p_value "target configuration" "${TARGET_CONFIG}" + p_value "c/native debug port" "${gdb_port}" + p_value "java debug port" "${jdwp_port}" + p_value "docker ip address" "${docker_ip}" + exit + ;; + + *) + options "$@" + print_usage "shell" "NONE" + ;; + esac +} +# }}} Command: Shell + +# {{{ Command: Tags +ctags_config() { +cat< ${HOME}/.container/.ctags.config + ctags_cmd > ${HOME}/.container/.ctags.cmd + ${sx} containers exec core bash /root/.ctags.cmd "$@" +} + +cmd_tags() { + local command="$1" + shift 1 + + case $command in + full) # tags: generate tags for remote attestation project ( and linux sgx sdk) + if [ -z "$1" ]; then + error "project not provided" + fi + generate_tags "$1" \ + ./linux-sgx/common \ + ./linux-sgx/psw \ + ./linux-sgx/sdk + ;; + + lean) # tags: generate tags for remote attestation project () + if [ -z "$1" ]; then + error "project not provided" + fi + generate_tags "$1" + ;; + + clean) # tags: remove generated tags file + rm -f ${repo_dir}/${repo_base}/tags + ;; + + *) + options "$@" + print_usage "tags" "NONE" + ;; + esac +} +# }}} Command: Tags + +# {{{ Command + +docker_path=$(which docker) +if [ -z "$docker_path" ]; then + error "cannot find an active docker installation; please install" +fi + +command=$1 +shift 1 + +case $command in + + build) # root: build project in container ( ) + cmd_build "$@" + ;; + + containers) # root: actions related to containers + cmd_containers "$@" + ;; + + debug) # root: actions related to debugging + cmd_debug "$@" + ;; + + exec) # root: shorthand for `containers exec core` + ${sx} containers exec core "$@" + ;; + + hsm) # root: actions related to the hsm simulator + cmd_hsm "$@" + ;; + + logs) # root: tail application logs + options "$@" + follow="" + if [ "${FORCE}" == "on" ]; then + follow="-f" + fi + find "${repo_dir}/${repo_base}" -type f \ + \( \ + -iname '*.log' -a \ + ! -iname '*build*' -a \ + ! -iname '*config*' -a \ + ! -iname '*cmake*' \ + \) + -exec tail -v ${follow} -n ${LINES} {} \; 2> /dev/null + ;; + + reports) # root: actions related to reports + cmd_reports "$@" + ;; + + shell) # root: show information about shell commands + cmd_shell "$@" + ;; + + tags) # root: actions related to tag databases + cmd_tags "$@" + ;; + + get-started) # root: build containers and key components + ${sx} containers build + ${sx} containers install-sdk + ${sx} debug config-defaults + ;; + + help) # root: show help information + options "$@" + print_usage "root" "." + ;; + + *) + if [ -z "${command}" ]; then + print_usage "root" "." + fi + error "invalid command ${blue}${command}${normal}, " \ + "run ${magenta}sx help${normal} for a list of available commands" + ;; +esac +# }}} Command