Merge branch 'master' of github.com:nasa/trick into web-sim-control-panel

This commit is contained in:
Jacqueline Deans 2023-04-21 11:26:31 -05:00
commit e24e3fe325
626 changed files with 28561 additions and 5460 deletions

178
.github/workflows/alt_linux_distros.yml vendored Normal file
View File

@ -0,0 +1,178 @@
name: More Linux
on:
schedule:
- cron: '0 0 * * 0'
workflow_dispatch:
jobs:
build:
strategy:
fail-fast: false
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 18.04, arch: debian, arch_ver: 10 }
- { os: ubuntu, tag: 20.04, arch: debian, arch_ver: 11 }
- { os: debian, tag: 10, arch: debian, arch_ver: 10 }
- { os: debian, tag: 11, arch: debian, arch_ver: 11 }
- { os: debian, tag: bookworm, arch: debian, arch_ver: 12 }
- { os: almalinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2, 3]
#-------- Defaults --------------------------
include:
- cfg: {}
deps: >-
bison
clang
flex
git
llvm
make
maven
cmake
zip
install_gtest: echo gtest already installed
conf_pkg: echo package manager already configured
install_cmd: install -y
#-------- Debian-based Dependencies ----------------
- cfg: { arch: debian }
pkg_mgr: apt-get
conf_pkg: apt-get update
arch_deps: >-
swig
curl
g++
libx11-dev
libxml2-dev
libxt-dev
libmotif-common
libmotif-dev
zlib1g-dev
llvm-dev
libclang-dev
libudunits2-dev
libgtest-dev
default-jdk
python3-dev
python3-pip
python3-venv
install_gtest: |
apt-get install -y libgtest-dev
cd /usr/src/gtest
cmake .
make
cp lib/libgtest* /usr/lib/
#-------- RHEL Dependencies ----------------
- cfg: { arch: rhel }
arch_deps: >-
clang-devel
gcc
gcc-c++
java-11-openjdk-devel
libxml2-devel
llvm-devel
llvm-static
ncurses-devel
openmotif
openmotif-devel
perl
perl-Digest-MD5
udunits2
udunits2-devel
which
zlib-devel
python2-devel
python3-devel
#-------- RHEL 8-based Only Dependencies ----------------
- cfg: { arch: rhel, arch_ver: 8 }
pkg_mgr: dnf
tag_deps: >-
swig
diffutils
conf_pkg: |
dnf -y install epel-release
dnf -y update
dnf install -y 'dnf-command(config-manager)'
install_gtest: |
dnf config-manager --enable powertools
dnf install -y gtest-devel
#-------- Debian 10-based Only Dependencies ----------------
- cfg: { arch: debian, arch_ver: 10 }
install_gtest: |
apt-get install -y libgtest-dev
cd /usr/src/gtest
cmake .
make
cp libgtest* /usr/lib/
#-------- Debian 11-based Only Dependencies ----------------
- cfg: { arch: debian, arch_ver: 11 }
conf_pkg: |
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y tzdata
#-------- Python2.7 dependencies ---------------------------
- cfg: { arch: debian }
python: 2
python_deps: >-
python2.7-dev
#-------- Exclude bookworm on python 2 ---------------------
exclude:
- cfg: { tag: bookworm }
python: 2
#-------- OS and Version Specific Dependencies ----------------
# None
#-------- Job definition ----------------
runs-on: ubuntu-latest
container: docker://${{matrix.cfg.os}}:${{matrix.cfg.tag}}
steps:
- name: Update Package Manager
run: ${{matrix.conf_pkg}}
- name: Install Dependencies
run: >
${{matrix.pkg_mgr}}
${{matrix.install_cmd}}
${{matrix.deps}}
${{matrix.arch_deps}}
${{matrix.os_deps}}
${{matrix.tag_deps}}
${{matrix.python_deps}}
- name: Install GTest
run: ${{matrix.install_gtest}}
- name: Checkout repository
uses: actions/checkout@master
- name: Configure Trick
run: |
export MAKEFLAGS=-j`nproc`
export PYTHON_VERSION=${{matrix.python}}
./configure
- name: Build Trick
run: |
export MAKEFLAGS=-j`nproc`
make
- name: Test
run: |
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../; make test
- name: Upload Tests
uses: actions/upload-artifact@v3.0.0
if: success() || failure() # run this step even if previous step failed
with:
name: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}}
path: trick_test/*.xml
retention-days: 1
# Uncomment for build artifacts
# - name: Upload Trick Build
# uses: actions/upload-artifact@v3.0.0
# with:
# name: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}}
# path: |
# bin
# include
# lib
# share
# libexec
# retention-days: 1

73
.github/workflows/code_coverage.yml vendored Normal file
View File

@ -0,0 +1,73 @@
name: Code Coverage
on:
push:
branches:
- master
paths-ignore:
- 'docs/**'
- '.github/workflows/**'
- '!.github/workflows/code_coverage.yml'
pull_request:
workflow_dispatch:
jobs:
code-coverage:
runs-on: ubuntu-latest
container: docker://oraclelinux:8
steps:
- name: Update Package Manager
run: |
dnf -y install epel-release
dnf -y update
dnf install -y 'dnf-command(config-manager)'
- name: Install Dependencies
run: |
dnf install -y bison clang flex git llvm make maven cmake zip clang-devel gcc gcc-c++ java-11-openjdk-devel libxml2-devel llvm-devel llvm-static ncurses-devel openmotif openmotif-devel perl perl-Digest-MD5 udunits2 udunits2-devel which zlib-devel python2-devel python3-devel swig diffutils lcov
- name: Install GTest
run: |
dnf config-manager --enable ol8_codeready_builder
dnf install -y gtest-devel
- name: Checkout repository
uses: actions/checkout@master
- name: Configure Trick
run: |
export MAKEFLAGS=-j`nproc`
export PYTHON_VERSION=3
./configure
- name: Build Trick
run: |
export MAKEFLAGS=-j`nproc`
export CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
make
- name: Generate Code Coverage
run: |
export MAKEFLAGS=-j`nproc`
export CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../
make code-coverage
- name: Upload to Coveralls
uses: coverallsapp/github-action@master
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: "./coverage.info"

View File

@ -0,0 +1,34 @@
name: 'Report More Linux'
on:
workflow_run:
workflows: ['More Linux'] # runs after CI workflow
types:
- completed
jobs:
report:
strategy:
fail-fast: false
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 18.04, arch: debian, arch_ver: 10 }
- { os: ubuntu, tag: 20.04, arch: debian, arch_ver: 11 }
- { os: debian, tag: 10, arch: debian, arch_ver: 10 }
- { os: debian, tag: 11, arch: debian, arch_ver: 11 }
- { os: debian, tag: bookworm, arch: debian, arch_ver: 12 }
- { os: almalinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2, 3]
#-------- Exclude bookworm on python 2 ---------------------
exclude:
- cfg: { tag: bookworm }
python: 2
runs-on: ubuntu-latest
steps:
- uses: dorny/test-reporter@v1
with:
artifact: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # artifact name
name: Results_Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # Name of the check run which will be created
path: '*.xml' # Path to test results (inside artifact .zip)
reporter: java-junit # Format of test results

View File

@ -1,4 +1,4 @@
name: 'Report'
name: 'Report Linux'
on:
workflow_run:
@ -12,15 +12,12 @@ jobs:
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 18.04, arch: debian, arch_ver: 10 }
- { os: ubuntu, tag: 20.04, arch: debian, arch_ver: 11 }
- { os: debian, tag: 10, arch: debian, arch_ver: 10 }
- { os: debian, tag: 11, arch: debian, arch_ver: 11 }
- { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
- { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: almalinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2, 3]
- { os: ubuntu, tag: 22.04, arch: debian, arch_ver: 12 }
- { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
- { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [3]
runs-on: ubuntu-latest
steps:

29
.github/workflows/report_linux_py2.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: 'Report Linux Python 2'
on:
workflow_run:
workflows: ['Linux Python 2'] # runs after CI workflow
types:
- completed
jobs:
report:
strategy:
fail-fast: false
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 22.04, arch: debian, arch_ver: 12 }
- { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
- { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2]
runs-on: ubuntu-latest
steps:
- uses: dorny/test-reporter@v1
with:
artifact: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # artifact name
name: Results_Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # Name of the check run which will be created
path: '*.xml' # Path to test results (inside artifact .zip)
reporter: java-junit # Format of test results

20
.github/workflows/report_macos.yml vendored Normal file
View File

@ -0,0 +1,20 @@
name: 'Report macOS'
on:
workflow_run:
workflows: ['macOS'] # runs after CI workflow
types:
- completed
jobs:
report:
strategy:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: dorny/test-reporter@v1
with:
artifact: Trick_macos # artifact name
name: Results_Trick_macos # Name of the check run which will be created
path: '*.xml' # Path to test results (inside artifact .zip)
reporter: java-junit # Format of test results

View File

@ -1,16 +1,13 @@
name: 32-bit
on:
push:
paths-ignore:
- 'docs/**'
- '.github/workflows/**'
- '!.github/workflows/test_32.yml'
pull_request:
schedule:
- cron: '0 0 * * 0'
workflow_dispatch:
jobs:
trick_32bit:
runs-on: ubuntu-18.04
runs-on: ubuntu-latest
container: docker://centos:7
steps:
- name: Checkout repository
@ -30,7 +27,7 @@ jobs:
glibc.i686 glibc-devel.i686 udunits2 udunits2-devel gtest-devel.i686
java-11-openjdk java-11-openjdk-devel expat-devel.i686
which gcc-gfortran git wget gsl-devel gtest-devel gsl-devel.i686
maven udunits2 udunits2-devel zip
maven udunits2 udunits2-devel zip libgcc*i686 libstdc++*i686 glibc*i686
- name: Symlink python
run: |
cd /usr/lib
@ -63,4 +60,10 @@ jobs:
./configure --without-hdf5 --enable-32bit
make
- name: Run tests
run: make test
run: |
cd share/trick/trickops/
yum install -y python3-devel
python3 -m venv .venv && . .venv/bin/activate && pip install --upgrade pip && pip3 install -r requirements.txt
cd ../../../
echo $MAKEFLAGS $CXXFLAGS $CFLAGS
make test

75
.github/workflows/test_32_oracle.yml vendored Normal file
View File

@ -0,0 +1,75 @@
name: 32-bit Oracle
on:
schedule:
- cron: '0 0 * * 0'
workflow_dispatch:
jobs:
trick_32bit_oracle:
runs-on: ubuntu-latest
container: docker://oraclelinux:8
steps:
- name: Add dnf repo
run: dnf -y install epel-release && dnf -y update
- name: Install other dependencies
run: >
dnf install -y tar bison clang clang-devel cmake diffutils
expat-devel.i686 expat.i686 flex gcc gcc-c++ git glibc-devel.i686
glibc.i686 glibc.x86_64 gsl-devel gsl-devel.i686 java-11-openjdk
java-11-openjdk-devel libstdc++-devel libstdc++-devel.i686 libxml2-devel
libxml2-devel.i686 llvm llvm-devel llvm-static make maven ncurses-devel
ncurses-devel.i686 openmotif openmotif-devel perl perl-Digest-MD5
python2-devel python3-devel python3-libs.i686 swig tar udunits2
udunits2-devel wget which zip zlib-devel zlib-devel.i686
- name: Symlink python
run: |
ln -s /lib/libpython3.6m.so.1.0 /lib/libpython3.6m.so
- name: Install Udunits (32 bit)
run: |
cd /
curl --retry 4 -O https://artifacts.unidata.ucar.edu/repository/downloads-udunits/current/udunits-2.2.28.tar.gz
tar xfvz udunits-2.2.28.tar.gz
rm -rf udunits-2.2.28.tar.gz
cd udunits-2.2.28
export CFLAGS="-m32"
export CXXFLAGS="-m32"
export MAKEFLAGS=-j`nproc`
./configure
make
make install
cd /
rm -rf udunit-2.2.28
- name: Install GTest
run: |
dnf config-manager --enable ol8_codeready_builder
dnf install -y gtest-devel
- name: Install GTest (32 bit)
run: |
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz
tar xfvz release-1.8.0.tar.gz
cd googletest-release-1.8.0/googletest
export CFLAGS="-m32"
export CXXFLAGS="-m32"
export MAKEFLAGS=-j`nproc`
cmake .
make
make install
- name: Checkout repository
uses: actions/checkout@master
- name: Build Trick
run: |
export CFLAGS="-m32"
export CXXFLAGS="-m32"
export MAKEFLAGS=-j`nproc`
./configure --without-hdf5 --enable-32bit
make
- name: Run tests
run: |
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../
export CFLAGS="-m32"
export CXXFLAGS="-m32"
export MAKEFLAGS=-j`nproc`
make test

View File

@ -2,6 +2,8 @@ name: Linux
on:
push:
branches:
- master
paths-ignore:
- 'docs/**'
- '.github/workflows/**'
@ -16,17 +18,11 @@ jobs:
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 18.04, arch: debian, arch_ver: 10 }
- { os: ubuntu, tag: 20.04, arch: debian, arch_ver: 11 }
- { os: ubuntu, tag: 22.04, arch: debian, arch_ver: 12 }
- { os: debian, tag: 10, arch: debian, arch_ver: 10 }
- { os: debian, tag: 11, arch: debian, arch_ver: 11 }
- { os: debian, tag: bookworm, arch: debian, arch_ver: 12 }
- { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
- { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: almalinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2, 3]
python: [3]
#-------- Defaults --------------------------
include:
@ -65,6 +61,8 @@ jobs:
default-jdk
python2.7-dev
python3-dev
python3-pip
python3-venv
install_gtest: |
apt-get install -y libgtest-dev
cd /usr/src/gtest
@ -116,20 +114,6 @@ jobs:
install_gtest: |
dnf config-manager --enable powertools
dnf install -y gtest-devel
#-------- Debian 10-based Only Dependencies ----------------
- cfg: { arch: debian, arch_ver: 10 }
install_gtest: |
apt-get install -y libgtest-dev
cd /usr/src/gtest
cmake .
make
cp libgtest* /usr/lib/
#-------- Debian 11-based Only Dependencies ----------------
- cfg: { arch: debian, arch_ver: 11 }
conf_pkg: |
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get install -y tzdata
#-------- OS and Version Specific Dependencies ----------------
- cfg: { os: oraclelinux }
install_gtest: |
@ -163,7 +147,10 @@ jobs:
export MAKEFLAGS=-j`nproc`
make
- name: Test
run: make test
run: |
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../; make test
- name: Upload Tests
uses: actions/upload-artifact@v3.0.0
if: success() || failure() # run this step even if previous step failed
@ -183,30 +170,3 @@ jobs:
# share
# libexec
# retention-days: 1
# report:
# needs: [build]
# strategy:
# fail-fast: false
# matrix:
# cfg:
# #-------- Operating Systems ----------------
# - { os: ubuntu, tag: 18.04, arch: debian, arch_ver: 10 }
# - { os: ubuntu, tag: 20.04, arch: debian, arch_ver: 11 }
# - { os: debian, tag: 10, arch: debian, arch_ver: 10 }
# - { os: debian, tag: 11, arch: debian, arch_ver: 11 }
# - { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
# - { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
# - { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
# - { os: almalinux, tag: 8, arch: rhel, arch_ver: 8 }
# python: [2, 3]
# runs-on: ubuntu-latest
# steps:
# - uses: dorny/test-reporter@v1
# with:
# artifact: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # artifact name
# name: Results_Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}} # Name of the check run which will be created
# path: '*.xml' # Path to test results (inside artifact .zip)
# reporter: java-junit # Format of test results

166
.github/workflows/test_linux_py2.yml vendored Normal file
View File

@ -0,0 +1,166 @@
name: 'Linux Python 2'
on:
schedule:
- cron: '0 0 * * 0'
workflow_dispatch:
jobs:
build:
strategy:
fail-fast: false
matrix:
cfg:
#-------- Operating Systems ----------------
- { os: ubuntu, tag: 22.04, arch: debian, arch_ver: 12 }
- { os: centos, tag: 7, arch: rhel, arch_ver: 7 }
- { os: rockylinux, tag: 8, arch: rhel, arch_ver: 8 }
- { os: oraclelinux, tag: 8, arch: rhel, arch_ver: 8 }
python: [2]
#-------- Defaults --------------------------
include:
- cfg: {}
deps: >-
bison
clang
flex
git
llvm
make
maven
cmake
zip
install_gtest: echo gtest already installed
conf_pkg: echo package manager already configured
install_cmd: install -y
#-------- Debian-based Dependencies ----------------
- cfg: { arch: debian }
pkg_mgr: apt-get
conf_pkg: apt-get update
arch_deps: >-
swig
curl
g++
libx11-dev
libxml2-dev
libxt-dev
libmotif-common
libmotif-dev
zlib1g-dev
llvm-dev
libclang-dev
libudunits2-dev
libgtest-dev
default-jdk
python2.7-dev
python3-dev
python3-pip
python3-venv
install_gtest: |
apt-get install -y libgtest-dev
cd /usr/src/gtest
cmake .
make
cp lib/libgtest* /usr/lib/
#-------- RHEL Dependencies ----------------
- cfg: { arch: rhel }
arch_deps: >-
clang-devel
gcc
gcc-c++
java-11-openjdk-devel
libxml2-devel
llvm-devel
llvm-static
ncurses-devel
openmotif
openmotif-devel
perl
perl-Digest-MD5
udunits2
udunits2-devel
which
zlib-devel
python2-devel
python3-devel
#-------- RHEL 7-based Only Dependencies ----------------
- cfg: { arch: rhel, arch_ver: 7 }
pkg_mgr: yum
conf_pkg: |
yum -y install epel-release
yum -y update
os_deps: >-
libX11-devel
libXt-devel
swig3
gtest-devel
#-------- RHEL 8-based Only Dependencies ----------------
- cfg: { arch: rhel, arch_ver: 8 }
pkg_mgr: dnf
tag_deps: >-
swig
diffutils
conf_pkg: |
dnf -y install epel-release
dnf -y update
dnf install -y 'dnf-command(config-manager)'
install_gtest: |
dnf config-manager --enable powertools
dnf install -y gtest-devel
#-------- OS and Version Specific Dependencies ----------------
- cfg: { os: oraclelinux }
install_gtest: |
dnf config-manager --enable ol8_codeready_builder
dnf install -y gtest-devel
#-------- Job definition ----------------
runs-on: ubuntu-latest
container: docker://${{matrix.cfg.os}}:${{matrix.cfg.tag}}
steps:
- name: Update Package Manager
run: ${{matrix.conf_pkg}}
- name: Install Dependencies
run: >
${{matrix.pkg_mgr}}
${{matrix.install_cmd}}
${{matrix.deps}}
${{matrix.arch_deps}}
${{matrix.os_deps}}
${{matrix.tag_deps}}
- name: Install GTest
run: ${{matrix.install_gtest}}
- name: Checkout repository
uses: actions/checkout@master
- name: Configure Trick
run: |
export MAKEFLAGS=-j`nproc`
export PYTHON_VERSION=${{matrix.python}}
./configure
- name: Build Trick
run: |
export MAKEFLAGS=-j`nproc`
make
- name: Test
run: |
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../; make test
- name: Upload Tests
uses: actions/upload-artifact@v3.0.0
if: success() || failure() # run this step even if previous step failed
with:
name: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}}
path: trick_test/*.xml
retention-days: 1
# Uncomment for build artifacts
# - name: Upload Trick Build
# uses: actions/upload-artifact@v3.0.0
# with:
# name: Trick_${{matrix.cfg.os}}${{matrix.cfg.tag}}_py${{matrix.python}}
# path: |
# bin
# include
# lib
# share
# libexec
# retention-days: 1

View File

@ -2,11 +2,14 @@ name: macOS
on:
push:
branches:
- master
paths-ignore:
- 'docs/**'
- '.github/workflows/**'
- '!.github/workflows/test_macos.yml'
pull_request:
workflow_dispatch:
jobs:
macOS:
@ -17,23 +20,29 @@ jobs:
- name: Install gtest
run: |
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz
tar xzvf release-1.8.0.tar.gz
cd googletest-release-1.8.0/googletest
cmake .
make
make install
brew install googletest
- name: Install dependencies
run: |
# sudo installer -pkg /Library/Developer/CommandLineTools/Packages/macOS_SDK_headers_for_macOS_10.15.pkg -target /
brew install --cask xquartz
brew install llvm@11 swig udunits openmotif maven
brew install swig udunits openmotif maven
wget https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.6/clang+llvm-14.0.6-x86_64-apple-darwin.tar.xz
tar -xvf clang+llvm-14.0.6-x86_64-apple-darwin.tar.xz
- name: Build Trick
run: |
export MAKEFLAGS=-j4
./configure --with-llvm=/usr/local/opt/llvm@11
./configure --with-llvm=${GITHUB_WORKSPACE}/clang+llvm-14.0.6-x86_64-apple-darwin
make
- name: Run tests
- name: Test
run: |
cd share/trick/trickops/
python3 -m venv .venv && source .venv/bin/activate && pip3 install -r requirements.txt
export MAKEFLAGS=-j4
make test
cd ../../../; make test
- name: Upload Tests
uses: actions/upload-artifact@v3.0.0
if: success() || failure() # run this step even if previous step failed
with:
name: Trick_macos
path: trick_test/*.xml
retention-days: 1

View File

@ -1,12 +1,9 @@
name: TrickOps
# This workflow is triggered on pushes to the repository.
on:
push:
paths-ignore:
- 'docs/**'
- '.github/workflows/**'
- '!.github/workflows/trickops.yml'
pull_request:
schedule:
- cron: '0 0 * * 0'
workflow_dispatch:
defaults:
run:

8
.gitignore vendored
View File

@ -34,4 +34,10 @@ civetweb_clone/
include/civet/
.vscode/
civet_server_error.log
server.pem
server.pem
archive/
.venv
trickops_logs/
*.gcda
*.gcno
coverage.info

View File

@ -3,8 +3,8 @@ cmake_minimum_required(VERSION 3.1)
# trick is a C/C++ project, but we have some macOS
# configuration to do before CMake searches for compilers
project(trick NONE)
set(TRICK_MAJOR 23)
set(TRICK_MINOR 0)
set(TRICK_MAJOR 19)
set(TRICK_MINOR 6)
set(TRICK_TINY 0)
# set TRICK_PRERELEASE TO EMPTY STRING "" ON RELEASE
set(TRICK_PRERELEASE "-beta")

View File

@ -42,6 +42,7 @@ SIM_SERV_DIRS = \
${TRICK_HOME}/trick_source/sim_services/MemoryManager \
${TRICK_HOME}/trick_source/sim_services/Message \
${TRICK_HOME}/trick_source/sim_services/MonteCarlo \
${TRICK_HOME}/trick_source/sim_services/MonteCarloGeneration \
${TRICK_HOME}/trick_source/sim_services/RealtimeInjector \
${TRICK_HOME}/trick_source/sim_services/RealtimeSync \
${TRICK_HOME}/trick_source/sim_services/ScheduledJobQueue \
@ -90,19 +91,23 @@ endif
ER7_UTILS_OBJS = $(addsuffix /object_$(TRICK_HOST_CPU)/*.o ,$(ER7_UTILS_DIRS))
UTILS_DIRS := \
${TRICK_HOME}/trick_source/trick_utils/compareFloatingPoint \
${TRICK_HOME}/trick_source/trick_utils/interpolator \
${TRICK_HOME}/trick_source/trick_utils/trick_adt \
${TRICK_HOME}/trick_source/trick_utils/comm \
${TRICK_HOME}/trick_source/trick_utils/shm \
${TRICK_HOME}/trick_source/trick_utils/math \
${TRICK_HOME}/trick_source/trick_utils/units \
${TRICK_HOME}/trick_source/trick_utils/unicode
${TRICK_HOME}/trick_source/trick_utils/unicode \
${TRICK_HOME}/trick_source/trick_utils/var_binary_parser
UTILS_OBJS := $(addsuffix /object_$(TRICK_HOST_CPU)/*.o ,$(UTILS_DIRS))
# filter out the directories that make their own libraries
UTILS_OBJS := $(filter-out ${TRICK_HOME}/trick_source/trick_utils/comm/%, $(UTILS_OBJS))
UTILS_OBJS := $(filter-out ${TRICK_HOME}/trick_source/trick_utils/math/%, $(UTILS_OBJS))
UTILS_OBJS := $(filter-out ${TRICK_HOME}/trick_source/trick_utils/units/%, $(UTILS_OBJS))
UTILS_OBJS := $(filter-out ${TRICK_HOME}/trick_source/trick_utils/var_binary_parser/%, $(UTILS_OBJS))
#-------------------------------------------------------------------------------
# Specify the contents of: libtrick_pyip.a
@ -284,6 +289,7 @@ premade:
################################################################################
# TESTING
################################################################################
# This target runs Trick's Unit-tests and simulation-tests.
test: unit_test sim_test
@ echo "All tests completed sucessfully"
@ -301,12 +307,27 @@ $(DPX_UNIT_TEST_DIR):
sim_test:
@ $(MAKE) -C test
@ $(MAKE) -C trick_sims test
@ $(MAKE) -f test_overrides.mk sim_test
pytest:
make -C share/trick/pymods/trick
COVERAGE_DIRS = trick_source/sim_services \
trick_source/trick_utils/var_binary_parser \
trick_source/trick_utils/unicode \
trick_source/trick_utils/units \
trick_source/trick_utils/interpolator \
trick_source/trick_utils/comm \
trick_source/trick_utils/SAIntegrator
extra-coverage-builds:
@ $(MAKE) test -C trick_source/trick_utils/SAIntegrator
code-coverage: test extra-coverage-builds
lcov --capture $(addprefix --directory , $(COVERAGE_DIRS)) --output-file coverage_large.info
lcov --remove coverage_large.info '/Library/*' '/usr/*' '*/io_src/*' '*/test/*' '*/unittest/*' -o coverage.info
rm coverage_large.info
lcov --list coverage.info
#requirements:
# @ $(MAKE) -C trick_test/requirements_docs install

View File

@ -1,27 +1,27 @@
<p align=center>
<a href="https://nasa.github.io/trick">
<img src="https://raw.github.com/nasa/Trick/master/TrickLogo.png" alt="Trick Logo" height=150px>
</a>
<picture>
<a href="https://nasa.github.io/trick">
<source media="(prefers-color-scheme: dark)" srcset="https://raw.github.com/nasa/Trick/master/TrickLogo_darkmode.png">
<img alt="Trick logo" src="https://raw.github.com/nasa/Trick/master/TrickLogo.png" height=150px>
</picture>
</p>
<p align=left>
<a href="https://github.com/nasa/trick/actions?query=workflow%3ALinux">
<img src="https://github.com/nasa/trick/workflows/Linux/badge.svg?branch=master" alt="Linux" height=30px>
</a>
</p>
<p align=left>
<a href="https://github.com/nasa/trick/actions?query=workflow%3AmacOS">
<img src="https://github.com/nasa/trick/workflows/macOS/badge.svg?branch=master" alt="macOS" height=30px>
</a>
</p>
<p align=left>
<a href="https://github.com/nasa/trick/actions?query=workflow%3A32-bit">
<img src="https://github.com/nasa/trick/workflows/32-bit/badge.svg?branch=master" alt="macOS" height=30px>
</a>
<a href="https://coveralls.io/github/nasa/trick?branch=master">
<img src="https://coveralls.io/repos/github/nasa/trick/badge.svg?branch=master" alt="Coverage Status" height=30px>
</a>
</p>
<p align=justify>
The Trick Simulation Environment, developed at the NASA Johnson Space Center, is a powerful simulation development framework that enables users to build applications for all phases of space vehicle development. Trick expedites the creation of simulations for early vehicle design, performance evaluation, flight software development, flight vehicle dynamic load analysis, and virtual/hardware in the loop training. Trick's purpose is to provide a common set of simulation capabilities that allow users to concentrate on their domain specific models, rather than simulation-specific functions like job ordering, input file processing, or data recording.
</p>
@ -66,6 +66,20 @@ The Trick Simulation Environment, developed at the NASA Johnson Space Center, is
</tbody>
</table>
<table align="center">
<col width="100%">
<thead>
<tr>
<th><a href="https://nasa.github.io/trick/developer_docs/Developer-Docs-Home"> Developer Docs </a></th>
</tr>
</thead>
<tbody>
<tr align="center">
<td>Read detailed documentation for various Trick internals and processes.</td>
</tr>
</tbody>
</table>
---
Trick is released under the NASA Open Source Agreement Version 1.3 [license](https://github.com/nasa/trick/blob/master/LICENSE).

BIN
TrickLogo_darkmode.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 110 KiB

1690
autoconf/config.guess vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

2946
autoconf/config.sub vendored Executable file → Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,307 +1,25 @@
dnl To generate a new top level configure script from this autoconf directory
dnl 1. aclocal
dnl 2. autoconf -o ../configure
dnl If you run into "configure.ac:15: error: possibly undefined macro: AC_MSG_ERROR",
dnl try:
dnl autoreconf -fi
AC_INIT([Trick],[17+])
AC_LANG(C++)
AC_CONFIG_AUX_DIR([autoconf])
AC_DEFUN([AX_LLVM_HOME],[
AC_ARG_WITH([llvm],
AS_HELP_STRING([--with-llvm@<:@=DIR@:>@], [LLVM root directory]),
[LLVM_HOME="$withval"
AC_PATH_PROG(LLVM_CONFIG, llvm-config, no-llvm-config, "$LLVM_HOME/bin")
AS_IF([test "$ac_cv_path_LLVM_CONFIG" = "no-llvm-config"],AC_MSG_ERROR([could not find llvm-config]),[])
],
[
AC_PATH_PROG(LLVM_CONFIG, llvm-config, no-llvm-config, "/bin:/usr/bin:/usr/local/bin:/sw/bin:/usr/local/opt/llvm/bin")
AS_IF([test "$ac_cv_path_LLVM_CONFIG" = "no-llvm-config"],AC_MSG_ERROR([could not find llvm-config]),[])
LLVM_HOME=`$LLVM_CONFIG --prefix`
]
)
])
AC_DEFUN([AX_HDF5_HOME],[
AC_ARG_WITH([hdf5],
AS_HELP_STRING([--with-hdf5@<:@=DIR@:>@], [HDF5 root directory]),
[HDF5_HOME="$withval"
AS_IF([test "$HDF5_HOME" = "yes"],
AC_CHECK_HEADER(hdf5.h,[HDF5_HOME="/usr"],AC_MSG_ERROR([could not find hdf5.h])),
AS_IF([test "$HDF5_HOME" = "no"],[HDF5_HOME=""],
AC_CHECK_FILE([$HDF5_HOME/include/hdf5.h],
[],
AC_MSG_ERROR([could not find $HDF5_HOME/include/hdf5.h])
)
)
)],
[
AC_CHECK_HEADER(hdf5.h, [HDF5_HOME="/usr"], [HDF5_HOME=""])
]
)
AS_IF([test "$HDF5_HOME" != ""],
[
LDFLAGS="${LDFLAGS} -L${HDF5_HOME}/lib"
AC_CHECK_LIB(hdf5, main, [],AC_MSG_ERROR([could not find libhdf5]))
]
[]
)
AC_SUBST([HDF5_HOME])
])
AC_DEFUN([AX_GSL_HOME],[
AC_ARG_WITH([gsl],
AS_HELP_STRING([--with-gsl@<:@=DIR@:>@], [GSL root directory]),
[GSL_HOME="$withval"
AS_IF([test "$GSL_HOME" = "yes"],
AC_CHECK_HEADER(gsl/gsl_rng.h,[GSL_HOME="/usr"],AC_MSG_ERROR([could not find gsl/gsl_rng.h])),
AS_IF([test "$GSL_HOME" = "no"],[GSL_HOME=""],
AC_CHECK_FILE([$GSL_HOME/include/gsl],
[],
AC_MSG_ERROR([could not find $GSL_HOME/include/gsl])
)
)
)],
[AC_CHECK_HEADER(gsl/gsl_rng.h, [GSL_HOME="/usr"], [GSL_HOME=""])]
)
AS_IF([test "$GSL_HOME" != ""],
[
LDFLAGS="${LDFLAGS} -L${GSL_HOME}/lib"
AC_CHECK_LIB(gsl, main, [],AC_MSG_ERROR([could not find libgsl]),[-lgslcblas])
],
[]
)
AC_SUBST([GSL_HOME])
])
AC_DEFUN([AX_GTEST_HOME],[
AC_ARG_WITH([gtest],
AS_HELP_STRING([--with-gtest@<:@=DIR@:>@], [GTEST root directory]),
[GTEST_HOME="$withval"
AS_IF([test "$GTEST_HOME" = "yes"],
AC_CHECK_HEADER(gtest/gtest.h,[GTEST_HOME="/usr"],AC_MSG_ERROR([could not find gtest/gtest.h])),
AS_IF([test "$GTEST_HOME" = "no"],[GTEST_HOME=""],
AC_CHECK_FILE([$GTEST_HOME/include/gtest],
[],
AC_MSG_ERROR([could not find $GTEST_HOME/include/gtest])
)
)
)],
[AC_CHECK_HEADER(gtest/gtest.h, [GTEST_HOME="/usr"], [GTEST_HOME=""])]
)
AC_SUBST([GTEST_HOME])
])
AC_DEFUN([AX_CIVETWEB_HOME],[
AC_ARG_WITH([civetweb],
AS_HELP_STRING([--with-civetweb@<:@=DIR@:>@],
[CIVETWEB root directory]),
# --with option was provided.
[CIVETWEB_HOME="$withval"
USE_CIVETWEB="0"
# check whether directory arg was also provided.
AS_IF([test "$CIVETWEB_HOME" = "yes"],
AC_CHECK_HEADER(civetweb.h,
[CIVETWEB_HOME="/usr"; USE_CIVETWEB="1"],
AC_MSG_ERROR([could not find civetweb.h])),
# else check whether --without-civet or --with-civet=no specified.
AS_IF([test "$CIVETWEB_HOME" = "no"],
[CIVETWEB_HOME=""],
# else --with-civet was provided with a directory path.
AC_CHECK_FILES([$CIVETWEB_HOME/include/civetweb.h $CIVETWEB_HOME/lib/libcivetweb.a],
[USE_CIVETWEB="1"],
AC_MSG_ERROR([Could not find all of the civetweb files.]))
))
],
# --with option not provided.
[AC_CHECK_HEADER(civetweb.h,
[CIVETWEB_HOME="/usr"; USE_CIVETWEB="1"],
[CIVETWEB_HOME=""; USE_CIVETWEB="0"])
]
)
AC_SUBST([CIVETWEB_HOME])
AC_SUBST([USE_CIVETWEB])
])
AC_DEFUN([AX_SWIG_BIN],[
AC_ARG_WITH([swig],
[AS_HELP_STRING([--with-swig@<:@=DIR@:>@], [path of directory containing the SWIG executable.])],
[
TEMP_PATH="${PATH}"
PATH="$withval:${PATH}"
AX_PKG_SWIG($1, [], [AC_MSG_ERROR([Trick requires SWIG version >= 2.0])])
PATH="${TEMP_PATH}"
],
[AX_PKG_SWIG($1, [], [AC_MSG_ERROR([Trick requires SWIG version >= 2.0])])]
)
])
AC_DEFUN([AX_UDUNITS_HOME],[
AC_ARG_WITH([udunits],
AS_HELP_STRING([--with-udunits@<:@=DIR@:>@], [UDUnits root directory]),
[UDUNITS_HOME="$withval"],
[UDUNITS_HOME=""]
)
AC_SUBST([UDUNITS_HOME])
])
dnl SOURCE: https://stackoverflow.com/a/59191148
dnl NA_HELP_STRINGS(list1, help1[, list2, help2[, ... listN, helpN]])
dnl **************************************************************************
dnl
dnl Similar to `AS_HELP_STRING()`, but with support for multiple strings, each
dnl one associated with one or more options
dnl
dnl From: https://github.com/madmurphy/not-autotools
dnl
dnl **************************************************************************
m4_define([NA_HELP_STRINGS],
[m4_if(m4_count($1), [1],
[m4_if([$#], [0], [], [$#], [1],
[m4_text_wrap($1, [ ])],
[AS_HELP_STRING(m4_normalize($1), [$2])m4_if([$#], [2], [], [m4_newline()NA_HELP_STRINGS(m4_shift2($@))])])],
[m4_text_wrap(m4_argn(1, $1)[,], [ ])m4_newline()NA_HELP_STRINGS(m4_dquote(m4_shift($1))m4_if([$#], [1], [], [, m4_shift($@)]))])])
dnl test if want to prepend /usr/local/bin to PATH
dnl AC_ARG_WITH (package, help-string, [action-if-given], [action-if-not-given])
AC_DEFUN(
[AX_PREPEND_PATH],
[AC_ARG_WITH(
[prepend-path],
[NA_HELP_STRINGS(
[--without-prepend-path], [do not prepend to path (this is default)], [--with-prepend-path@<:@=DIR@:>@], [specify a directory to prepend to PATH (default is /usr/local/bin)])],
[AS_IF(
[test "x${with_prepend_path}" = xyes],
[PATH="/usr/local/bin:${PATH}"],
[AS_IF(
[test "x${with_prepend_path}" != xno],
[PATH="${withval}:${PATH}"],
[]
)]
)],
[]
)]
)
dnl test if we want to use java, default to yes
AC_DEFUN([AX_JAVA],[
AC_ARG_ENABLE([java],
AS_HELP_STRING([--enable-java], [use java (default is yes)]),
AS_IF([test "x$enable_java" = xyes], [USE_JAVA="1"], [USE_JAVA="0"]),
[USE_JAVA="1"]
)
AC_SUBST([USE_JAVA])
AS_IF([test "$USE_JAVA" = "1"],[
AC_PROG_AWK
AX_PROG_JAVA_CC(javac)
JAVA_VER=`$JAVA_CC -version 2>&1 | $ac_cv_path_PERL -ne 'print $& if /\d+(\.\d+)?/'`
AC_MSG_CHECKING([$JAVA_CC version >= 1.8])
AX_COMPARE_VERSION([$JAVA_VER],[ge],[1.8], [AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_ERROR([Trick requires javac version >= 1.8])
])
AS_IF([test "$TRICK_OFFLINE" = "0"], [
AC_PATH_PROG(MVN, mvn, nomvn)
AS_IF([test "$ac_cv_path_MVN" = "nomvn"],AC_MSG_ERROR([could not find maven]),[])
], [])
],[])
])
dnl test if we want to use er7_utils, default to yes
AC_DEFUN([AX_ER7_UTILS],[
AC_ARG_ENABLE([er7utils],
AS_HELP_STRING([--enable-er7utils], [use er7_utils (default is yes)]),
AS_IF([test "x$enable_er7utils" = xyes], [USE_ER7_UTILS="1"], [USE_ER7_UTILS="0"]),
[USE_ER7_UTILS="1"]
)
AC_SUBST([USE_ER7_UTILS])
])
dnl if fermi-ware directory exists, test for motif.
AC_DEFUN([AX_JSC_DIRS],[
AC_CHECK_FILE([trick_source/data_products/fermi-ware],
[
AC_CHECK_HEADER(Xm/Xm.h, [MOTIF_HOME="/usr"],
AC_CHECK_FILE(/usr/local/include/Xm/Xm.h, [MOTIF_HOME="/usr/local"],
AC_CHECK_FILE(/sw/include/Xm/Xm.h, [MOTIF_HOME="/sw"],AC_MSG_ERROR([could not find Xm/Xm.h]))))
],
[
MOTIF_HOME=""
]
)
AC_SUBST([MOTIF_HOME])
])
AC_DEFUN([AX_GCC_VERSION], [
GCC_VERSION=""
AS_IF([test "x$GCC" = "xyes"],[
AS_IF([test "x$ax_gcc_version_option" != "xno"],[
AC_CACHE_CHECK([gcc version],[ax_cv_gcc_version],[
ax_cv_gcc_version="`$CC -dumpversion`"
AS_IF([test "x$ax_cv_gcc_version" = "x"],[
ax_cv_gcc_version=""
])
])
GCC_VERSION=$ax_cv_gcc_version
])
])
AC_SUBST([GCC_VERSION])
])
AC_CONFIG_MACRO_DIRS([m4])
dnl get the host and build.
AC_CANONICAL_HOST
AC_CANONICAL_BUILD
dnl AC_CANONICAL_HOST provides platform. MacOSX is diverging... have to do special things
ON_MAC=no
case "${host_os}" in
darwin*)
ON_MAC=yes
;;
*)
;;
esac
dnl add extra paths to find xml headers and X headers on the mac.
AS_IF([test "$ON_MAC" = "yes"],
[
AC_PATH_PROG(XCRUN, xcrun, noxcrun)
AS_IF([test "$ac_cv_path_XCRUN" = "norun"],AC_MSG_ERROR([could not find xcrun - install Xcode command line tools]),[])
XCODE_SDK_PATH=`$XCRUN --show-sdk-path`
XTRAINCPATHS="-I/usr/X11/include -I${XCODE_SDK_PATH}/usr/include -I${XCODE_SDK_PATH}/usr/include/libxml2"
],
[
XTRAINCPATHS="-I/usr/include/libxml2/"
])
CFLAGS="$CFLAGS $XTRAINCPATHS"
CPPFLAGS="$CPPFLAGS $XTRAINCPATHS"
dnl check if we are on a mac
TR_MAC_DARWIN()
dnl add x11 libs to environment
TR_X11()
dnl look for X windows libraries and headers we need to compile
dnl these checks must be done before we force 32bit mode.
AC_PATH_X
AS_IF([test "$no_x" = "yes"],
[
USE_X_WINDOWS=0
],
[
USE_X_WINDOWS=1
AS_IF([test "x$x_includes" = "x"],[],[X_INCLUDE_DIR=-I$x_includes])
AS_IF([test "x$x_libraries" = "x"],[],[X_LIB_DIR=-L$x_libraries])
AC_CHECK_HEADER([X11/Intrinsic.h],[],AC_MSG_ERROR([could not find libxt development headers]))
AX_JSC_DIRS([])
]
)
AC_SUBST([USE_X_WINDOWS])
AC_SUBST([X_LIB_DIR])
TR_XWINDOWS()
dnl TODO Need to check 64bit only!
AC_CHECK_LIB(xml2, main,
@ -377,7 +95,7 @@ AC_PROG_CC
AC_PROG_CXX
dnl do not test gcc version on mac. clang as gcc will return version and it's not >= 4.8
AS_IF([test "$ON_MAC" = "no"],[AX_GCC_VERSION],[])
AS_IF([test "$ON_MAC" = "no"],[TR_GCC_VERSION],[])
dnl if the compiler is gcc, test for gcc >= 4.8
AS_IF([test "x$GCC_VERSION" = "x"],[],
[AC_MSG_CHECKING([gcc version >= 4.8])
@ -394,7 +112,7 @@ AC_PATH_PROG(TEE, tee, notee)
AS_IF([test "$ac_cv_path_TEE" = "notee"],AC_MSG_ERROR([could not find tee]),[])
AC_PATH_PROG(LD, ld, nold)
AS_IF([test "$ac_cv_path_LD" = "nold"],AC_MSG_ERROR([could not find ld]),[])
AC_PROG_LEX
AC_PROG_LEX([noyywrap])
AS_IF([test "x$LEX" = "x:"],AC_MSG_ERROR([could not find flex]),[])
AC_PATH_PROG(BISON, bison, nobison)
AS_IF([test "$ac_cv_path_BISON" = "nobison"],AC_MSG_ERROR([could not find bison]),[])
@ -463,21 +181,22 @@ AX_COMPARE_VERSION(
PYTHON_CPPFLAGS=`${PYTHON_CONFIG} --includes | sed 's/-I/-isystem/g'`
PYTHON_LIBS=`${PYTHON_LIBS_COMMAND} | tr '\r\n' ' '`
AC_SUBST([PYTHON_CPPFLAGS])
AC_SUBST([PYTHON_LIBS])
AC_SUBST([PYTHON_EXTRA_LIBS])
AX_PREPEND_PATH
TR_PREPEND_PATH()
AC_PATH_PROG(GNUPLOT, gnuplot, nognuplot)
AS_IF([test "$ac_cv_path_GNUPLOT" = "nognuplot"],AC_MSG_NOTICE([could not find gnuplot]),[])
AX_SWIG_BIN([2.0])
AX_JAVA
TR_SWIG_BIN([2.0])
TR_JAVA
AX_PTHREAD()
AX_PROG_PERL_MODULES( Text::Balanced ,[],AC_MSG_ERROR([could not find perl modules Text::Balanced]))
AX_PROG_PERL_MODULES( Digest::MD5,[],AC_MSG_ERROR([could not find perl module Digest::MD5]))
dnl process the optional --with-llvm command line arguments
AX_LLVM_HOME([])
TR_LLVM_HOME([])
LLVM_LIB_DIR=`$LLVM_CONFIG --libdir`
LLVM_BIN_DIR=`$LLVM_CONFIG --bindir`
LLVM_INCLUDE_DIR=`$LLVM_CONFIG --includedir`
@ -514,19 +233,11 @@ AC_CHECK_FILE([$LLVM_LIB_DIR/libclangFrontend.a],
)
)
AC_CHECK_FILE([$LLVM_LIB_DIR/libclangSupport.a],[ICG_CLANGLIBS="$ICG_CLANGLIBS -lclangSupport"],[])
AC_SUBST([ICG_CLANGLIBS])
AC_DEFUN([AX_CLANG_VERSION], [
CLANG_VERSION=""
ax_cv_clang_version="`$CLANG --version | grep "version" | sed "s/.*version \([0-9]*\.[0-9]*\.[0-9]*\).*/\1/"`"
AS_IF([test "x$ax_cv_clang_version" = "x"],[
ax_cv_clang_version=""
])
CLANG_VERSION=$ax_cv_clang_version
AC_SUBST([CLANG_VERSION])
])
AX_CLANG_VERSION
TR_CLANG_VERSION
dnl if llvm/clang, test for version >= 3.4.2
AS_IF([test "x$CLANG_VERSION" = "x"],[],
[AC_MSG_CHECKING([clang version >= 3.4.2])
@ -539,7 +250,7 @@ AS_IF([test "x$CLANG_VERSION" = "x"],[],
AX_CHECK_ZLIB([],AC_MSG_ERROR([could not find zlib]))
dnl look for udunits in /usr/include and /usr/include/udunits2
AX_UDUNITS_HOME([])
TR_UDUNITS_HOME([])
AS_IF([test "$UDUNITS_HOME" = ""],
[
AC_CHECK_HEADER([udunits2.h],
@ -571,11 +282,11 @@ AC_SUBST([UDUNITS_LDFLAGS])
AC_SUBST([UDUNITS_EXCLUDE])
dnl process the other optional command line arguments
AX_HDF5_HOME([])
AX_GSL_HOME([])
AX_GTEST_HOME([])
AX_CIVETWEB_HOME([])
AX_ER7_UTILS([])
TR_HDF5_HOME([])
TR_GSL_HOME([])
TR_GTEST_HOME([])
TR_CIVETWEB_HOME([])
TR_ER7_UTILS([])
AC_CONFIG_FILES([share/trick/makefiles/config_user.mk])
AC_OUTPUT

View File

@ -1,7 +1,7 @@
#!/bin/sh
# install - install a program, script, or datafile
scriptversion=2011-11-20.07; # UTC
scriptversion=2020-11-14.01; # UTC
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
@ -41,19 +41,15 @@ scriptversion=2011-11-20.07; # UTC
# This script is compatible with the BSD install script, but was written
# from scratch.
tab=' '
nl='
'
IFS=" "" $nl"
IFS=" $tab$nl"
# set DOITPROG to echo to test this script
# Set DOITPROG to "echo" to test this script.
# Don't use :- since 4.3BSD and earlier shells don't like it.
doit=${DOITPROG-}
if test -z "$doit"; then
doit_exec=exec
else
doit_exec=$doit
fi
doit_exec=${doit:-exec}
# Put in absolute file names if you don't have them in your path;
# or use environment vars.
@ -68,22 +64,16 @@ mvprog=${MVPROG-mv}
rmprog=${RMPROG-rm}
stripprog=${STRIPPROG-strip}
posix_glob='?'
initialize_posix_glob='
test "$posix_glob" != "?" || {
if (set -f) 2>/dev/null; then
posix_glob=
else
posix_glob=:
fi
}
'
posix_mkdir=
# Desired mode of installed file.
mode=0755
# Create dirs (including intermediate dirs) using mode 755.
# This is like GNU 'install' as of coreutils 8.32 (2020).
mkdir_umask=22
backupsuffix=
chgrpcmd=
chmodcmd=$chmodprog
chowncmd=
@ -97,7 +87,7 @@ dir_arg=
dst_arg=
copy_on_change=false
no_target_directory=
is_target_a_directory=possibly
usage="\
Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
@ -114,18 +104,28 @@ Options:
--version display version info and exit.
-c (ignored)
-C install only if different (preserve the last data modification time)
-C install only if different (preserve data modification time)
-d create directories instead of installing files.
-g GROUP $chgrpprog installed files to GROUP.
-m MODE $chmodprog installed files to MODE.
-o USER $chownprog installed files to USER.
-p pass -p to $cpprog.
-s $stripprog installed files.
-S SUFFIX attempt to back up existing files, with suffix SUFFIX.
-t DIRECTORY install into DIRECTORY.
-T report an error if DSTFILE is a directory.
Environment variables override the default commands:
CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
RMPROG STRIPPROG
By default, rm is invoked with -f; when overridden with RMPROG,
it's up to you to specify -f if you want it.
If -S is not specified, no backups are attempted.
Email bug reports to bug-automake@gnu.org.
Automake home page: https://www.gnu.org/software/automake/
"
while test $# -ne 0; do
@ -137,46 +137,62 @@ while test $# -ne 0; do
-d) dir_arg=true;;
-g) chgrpcmd="$chgrpprog $2"
shift;;
shift;;
--help) echo "$usage"; exit $?;;
-m) mode=$2
case $mode in
*' '* | *' '* | *'
'* | *'*'* | *'?'* | *'['*)
echo "$0: invalid mode: $mode" >&2
exit 1;;
esac
shift;;
case $mode in
*' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*)
echo "$0: invalid mode: $mode" >&2
exit 1;;
esac
shift;;
-o) chowncmd="$chownprog $2"
shift;;
shift;;
-p) cpprog="$cpprog -p";;
-s) stripcmd=$stripprog;;
-t) dst_arg=$2
# Protect names problematic for 'test' and other utilities.
case $dst_arg in
-* | [=\(\)!]) dst_arg=./$dst_arg;;
esac
shift;;
-S) backupsuffix="$2"
shift;;
-T) no_target_directory=true;;
-t)
is_target_a_directory=always
dst_arg=$2
# Protect names problematic for 'test' and other utilities.
case $dst_arg in
-* | [=\(\)!]) dst_arg=./$dst_arg;;
esac
shift;;
-T) is_target_a_directory=never;;
--version) echo "$0 $scriptversion"; exit $?;;
--) shift
break;;
--) shift
break;;
-*) echo "$0: invalid option: $1" >&2
exit 1;;
-*) echo "$0: invalid option: $1" >&2
exit 1;;
*) break;;
esac
shift
done
# We allow the use of options -d and -T together, by making -d
# take the precedence; this is for compatibility with GNU install.
if test -n "$dir_arg"; then
if test -n "$dst_arg"; then
echo "$0: target directory not allowed when installing a directory." >&2
exit 1
fi
fi
if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
# When -d is used, all remaining arguments are directories to create.
# When -t is used, the destination is already specified.
@ -207,6 +223,15 @@ if test $# -eq 0; then
exit 0
fi
if test -z "$dir_arg"; then
if test $# -gt 1 || test "$is_target_a_directory" = always; then
if test ! -d "$dst_arg"; then
echo "$0: $dst_arg: Is not a directory." >&2
exit 1
fi
fi
fi
if test -z "$dir_arg"; then
do_exit='(exit $ret); exit $ret'
trap "ret=129; $do_exit" 1
@ -223,16 +248,16 @@ if test -z "$dir_arg"; then
*[0-7])
if test -z "$stripcmd"; then
u_plus_rw=
u_plus_rw=
else
u_plus_rw='% 200'
u_plus_rw='% 200'
fi
cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
*)
if test -z "$stripcmd"; then
u_plus_rw=
u_plus_rw=
else
u_plus_rw=,u+rw
u_plus_rw=,u+rw
fi
cp_umask=$mode$u_plus_rw;;
esac
@ -250,6 +275,10 @@ do
dstdir=$dst
test -d "$dstdir"
dstdir_status=$?
# Don't chown directories that already exist.
if test $dstdir_status = 0; then
chowncmd=""
fi
else
# Waiting for this to be detected by the "$cpprog $src $dsttmp" command
@ -266,178 +295,148 @@ do
fi
dst=$dst_arg
# If destination is a directory, append the input filename; won't work
# if double slashes aren't ignored.
# If destination is a directory, append the input filename.
if test -d "$dst"; then
if test -n "$no_target_directory"; then
echo "$0: $dst_arg: Is a directory" >&2
exit 1
if test "$is_target_a_directory" = never; then
echo "$0: $dst_arg: Is a directory" >&2
exit 1
fi
dstdir=$dst
dst=$dstdir/`basename "$src"`
dstbase=`basename "$src"`
case $dst in
*/) dst=$dst$dstbase;;
*) dst=$dst/$dstbase;;
esac
dstdir_status=0
else
# Prefer dirname, but fall back on a substitute if dirname fails.
dstdir=`
(dirname "$dst") 2>/dev/null ||
expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
X"$dst" : 'X\(//\)[^/]' \| \
X"$dst" : 'X\(//\)$' \| \
X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
echo X"$dst" |
sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
s//\1/
q
}
/^X\(\/\/\)[^/].*/{
s//\1/
q
}
/^X\(\/\/\)$/{
s//\1/
q
}
/^X\(\/\).*/{
s//\1/
q
}
s/.*/./; q'
`
dstdir=`dirname "$dst"`
test -d "$dstdir"
dstdir_status=$?
fi
fi
case $dstdir in
*/) dstdirslash=$dstdir;;
*) dstdirslash=$dstdir/;;
esac
obsolete_mkdir_used=false
if test $dstdir_status != 0; then
case $posix_mkdir in
'')
# Create intermediate dirs using mode 755 as modified by the umask.
# This is like FreeBSD 'install' as of 1997-10-28.
umask=`umask`
case $stripcmd.$umask in
# Optimize common cases.
*[2367][2367]) mkdir_umask=$umask;;
.*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
# With -d, create the new directory with the user-specified mode.
# Otherwise, rely on $mkdir_umask.
if test -n "$dir_arg"; then
mkdir_mode=-m$mode
else
mkdir_mode=
fi
*[0-7])
mkdir_umask=`expr $umask + 22 \
- $umask % 100 % 40 + $umask % 20 \
- $umask % 10 % 4 + $umask % 2
`;;
*) mkdir_umask=$umask,go-w;;
esac
posix_mkdir=false
# The $RANDOM variable is not portable (e.g., dash). Use it
# here however when possible just to lower collision chance.
tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
# With -d, create the new directory with the user-specified mode.
# Otherwise, rely on $mkdir_umask.
if test -n "$dir_arg"; then
mkdir_mode=-m$mode
trap '
ret=$?
rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null
exit $ret
' 0
# Because "mkdir -p" follows existing symlinks and we likely work
# directly in world-writeable /tmp, make sure that the '$tmpdir'
# directory is successfully created first before we actually test
# 'mkdir -p'.
if (umask $mkdir_umask &&
$mkdirprog $mkdir_mode "$tmpdir" &&
exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1
then
if test -z "$dir_arg" || {
# Check for POSIX incompatibilities with -m.
# HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
# other-writable bit of parent directory when it shouldn't.
# FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
test_tmpdir="$tmpdir/a"
ls_ld_tmpdir=`ls -ld "$test_tmpdir"`
case $ls_ld_tmpdir in
d????-?r-*) different_mode=700;;
d????-?--*) different_mode=755;;
*) false;;
esac &&
$mkdirprog -m$different_mode -p -- "$test_tmpdir" && {
ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"`
test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
}
}
then posix_mkdir=:
fi
rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir"
else
mkdir_mode=
# Remove any dirs left behind by ancient mkdir implementations.
rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null
fi
posix_mkdir=false
case $umask in
*[123567][0-7][0-7])
# POSIX mkdir -p sets u+wx bits regardless of umask, which
# is incompatible with FreeBSD 'install' when (umask & 300) != 0.
;;
*)
tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
if (umask $mkdir_umask &&
exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
then
if test -z "$dir_arg" || {
# Check for POSIX incompatibilities with -m.
# HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
# other-writable bit of parent directory when it shouldn't.
# FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
ls_ld_tmpdir=`ls -ld "$tmpdir"`
case $ls_ld_tmpdir in
d????-?r-*) different_mode=700;;
d????-?--*) different_mode=755;;
*) false;;
esac &&
$mkdirprog -m$different_mode -p -- "$tmpdir" && {
ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
}
}
then posix_mkdir=:
fi
rmdir "$tmpdir/d" "$tmpdir"
else
# Remove any dirs left behind by ancient mkdir implementations.
rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
fi
trap '' 0;;
esac;;
trap '' 0;;
esac
if
$posix_mkdir && (
umask $mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
umask $mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
)
then :
else
# The umask is ridiculous, or mkdir does not conform to POSIX,
# mkdir does not conform to POSIX,
# or it failed possibly due to a race condition. Create the
# directory the slow way, step by step, checking for races as we go.
case $dstdir in
/*) prefix='/';;
[-=\(\)!]*) prefix='./';;
*) prefix='';;
/*) prefix='/';;
[-=\(\)!]*) prefix='./';;
*) prefix='';;
esac
eval "$initialize_posix_glob"
oIFS=$IFS
IFS=/
$posix_glob set -f
set -f
set fnord $dstdir
shift
$posix_glob set +f
set +f
IFS=$oIFS
prefixes=
for d
do
test X"$d" = X && continue
test X"$d" = X && continue
prefix=$prefix$d
if test -d "$prefix"; then
prefixes=
else
if $posix_mkdir; then
(umask=$mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
# Don't fail if two instances are running concurrently.
test -d "$prefix" || exit 1
else
case $prefix in
*\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
*) qprefix=$prefix;;
esac
prefixes="$prefixes '$qprefix'"
fi
fi
prefix=$prefix/
prefix=$prefix$d
if test -d "$prefix"; then
prefixes=
else
if $posix_mkdir; then
(umask $mkdir_umask &&
$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
# Don't fail if two instances are running concurrently.
test -d "$prefix" || exit 1
else
case $prefix in
*\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
*) qprefix=$prefix;;
esac
prefixes="$prefixes '$qprefix'"
fi
fi
prefix=$prefix/
done
if test -n "$prefixes"; then
# Don't fail if two instances are running concurrently.
(umask $mkdir_umask &&
eval "\$doit_exec \$mkdirprog $prefixes") ||
test -d "$dstdir" || exit 1
obsolete_mkdir_used=true
# Don't fail if two instances are running concurrently.
(umask $mkdir_umask &&
eval "\$doit_exec \$mkdirprog $prefixes") ||
test -d "$dstdir" || exit 1
obsolete_mkdir_used=true
fi
fi
fi
@ -450,14 +449,25 @@ do
else
# Make a couple of temp file names in the proper directory.
dsttmp=$dstdir/_inst.$$_
rmtmp=$dstdir/_rm.$$_
dsttmp=${dstdirslash}_inst.$$_
rmtmp=${dstdirslash}_rm.$$_
# Trap to clean up those temp files at exit.
trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
# Copy the file name to the temp name.
(umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
(umask $cp_umask &&
{ test -z "$stripcmd" || {
# Create $dsttmp read-write so that cp doesn't create it read-only,
# which would cause strip to fail.
if test -z "$doit"; then
: >"$dsttmp" # No need to fork-exec 'touch'.
else
$doit touch "$dsttmp"
fi
}
} &&
$doit_exec $cpprog "$src" "$dsttmp") &&
# and set any options; do chmod last to preserve setuid bits.
#
@ -472,20 +482,24 @@ do
# If -C, don't bother to copy if it wouldn't change the file.
if $copy_on_change &&
old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
eval "$initialize_posix_glob" &&
$posix_glob set -f &&
old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
set -f &&
set X $old && old=:$2:$4:$5:$6 &&
set X $new && new=:$2:$4:$5:$6 &&
$posix_glob set +f &&
set +f &&
test "$old" = "$new" &&
$cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
then
rm -f "$dsttmp"
else
# If $backupsuffix is set, and the file being installed
# already exists, attempt a backup. Don't worry if it fails,
# e.g., if mv doesn't support -f.
if test -n "$backupsuffix" && test -f "$dst"; then
$doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null
fi
# Rename the file to the real destination.
$doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
@ -493,24 +507,24 @@ do
# to itself, or perhaps because mv is so ancient that it does not
# support -f.
{
# Now remove or move aside any old file at destination location.
# We try this two ways since rm can't unlink itself on some
# systems and the destination file might be busy for other
# reasons. In this case, the final cleanup might fail but the new
# file should still install successfully.
{
test ! -f "$dst" ||
$doit $rmcmd -f "$dst" 2>/dev/null ||
{ $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
{ $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
} ||
{ echo "$0: cannot unlink or rename $dst" >&2
(exit 1); exit 1
}
} &&
# Now remove or move aside any old file at destination location.
# We try this two ways since rm can't unlink itself on some
# systems and the destination file might be busy for other
# reasons. In this case, the final cleanup might fail but the new
# file should still install successfully.
{
test ! -f "$dst" ||
$doit $rmcmd "$dst" 2>/dev/null ||
{ $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
{ $doit $rmcmd "$rmtmp" 2>/dev/null; :; }
} ||
{ echo "$0: cannot unlink or rename $dst" >&2
(exit 1); exit 1
}
} &&
# Now rename the file to the real destination.
$doit $mvcmd "$dsttmp" "$dst"
# Now rename the file to the real destination.
$doit $mvcmd "$dsttmp" "$dst"
}
fi || exit 1
@ -519,9 +533,9 @@ do
done
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# eval: (add-hook 'before-save-hook 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-time-zone: "UTC0"
# time-stamp-end: "; # UTC"
# End:

View File

@ -0,0 +1,32 @@
AC_DEFUN([TR_CIVETWEB_HOME],[
AC_ARG_WITH([civetweb],
AS_HELP_STRING([--with-civetweb@<:@=DIR@:>@],
[CIVETWEB root directory]),
# --with option was provided.
[CIVETWEB_HOME="$withval"
USE_CIVETWEB="0"
# check whether directory arg was also provided.
AS_IF([test "$CIVETWEB_HOME" = "yes"],
AC_CHECK_HEADER(civetweb.h,
[CIVETWEB_HOME="/usr"; USE_CIVETWEB="1"],
AC_MSG_ERROR([could not find civetweb.h])),
# else check whether --without-civet or --with-civet=no specified.
AS_IF([test "$CIVETWEB_HOME" = "no"],
[CIVETWEB_HOME=""],
# else --with-civet was provided with a directory path.
AC_CHECK_FILES([$CIVETWEB_HOME/include/civetweb.h $CIVETWEB_HOME/lib/libcivetweb.a],
[USE_CIVETWEB="1"],
AC_MSG_ERROR([Could not find all of the civetweb files.]))
))
],
# --with option not provided.
[AC_CHECK_HEADER(civetweb.h,
[CIVETWEB_HOME="/usr"; USE_CIVETWEB="1"],
[CIVETWEB_HOME=""; USE_CIVETWEB="0"])
]
)
AC_SUBST([CIVETWEB_HOME])
AC_SUBST([USE_CIVETWEB])
])

View File

@ -0,0 +1,11 @@
AC_DEFUN([TR_CLANG_VERSION], [
CLANG_VERSION=""
ax_cv_clang_version="`$CLANG --version | grep "version" | sed "s/.*version \([0-9]*\.[0-9]*\.[0-9]*\).*/\1/"`"
AS_IF([test "x$ax_cv_clang_version" = "x"],[
ax_cv_clang_version=""
])
CLANG_VERSION=$ax_cv_clang_version
AC_SUBST([CLANG_VERSION])
])

View File

@ -0,0 +1,11 @@
dnl test if we want to use er7_utils, default to yes
AC_DEFUN([TR_ER7_UTILS],[
AC_ARG_ENABLE([er7utils],
AS_HELP_STRING([--enable-er7utils], [use er7_utils (default is yes)]),
AS_IF([test "x$enable_er7utils" = xyes], [USE_ER7_UTILS="1"], [USE_ER7_UTILS="0"]),
[USE_ER7_UTILS="1"]
)
AC_SUBST([USE_ER7_UTILS])
])

View File

@ -0,0 +1,17 @@
AC_DEFUN([TR_GCC_VERSION], [
GCC_VERSION=""
AS_IF([test "x$GCC" = "xyes"],[
AS_IF([test "x$ax_gcc_version_option" != "xno"],[
AC_CACHE_CHECK([gcc version],[ax_cv_gcc_version],[
ax_cv_gcc_version="`$CC -dumpfullversion -dumpversion`"
AS_IF([test "x$ax_cv_gcc_version" = "x"],[
ax_cv_gcc_version=""
])
])
GCC_VERSION=$ax_cv_gcc_version
])
])
AC_SUBST([GCC_VERSION])
])

View File

@ -0,0 +1,26 @@
AC_DEFUN([TR_GSL_HOME],[
AC_ARG_WITH([gsl],
AS_HELP_STRING([--with-gsl@<:@=DIR@:>@], [GSL root directory]),
[GSL_HOME="$withval"
AS_IF([test "$GSL_HOME" = "yes"],
AC_CHECK_HEADER(gsl/gsl_rng.h,[GSL_HOME="/usr"],AC_MSG_ERROR([could not find gsl/gsl_rng.h])),
AS_IF([test "$GSL_HOME" = "no"],[GSL_HOME=""],
AC_CHECK_FILE([$GSL_HOME/include/gsl],
[],
AC_MSG_ERROR([could not find $GSL_HOME/include/gsl])
)
)
)],
[AC_CHECK_HEADER(gsl/gsl_rng.h, [GSL_HOME="/usr"], [GSL_HOME=""])]
)
AS_IF([test "$GSL_HOME" != ""],
[
LDFLAGS="${LDFLAGS} -L${GSL_HOME}/lib"
AC_CHECK_LIB(gsl, main, [],AC_MSG_ERROR([could not find libgsl]),[-lgslcblas])
],
[]
)
AC_SUBST([GSL_HOME])
])

View File

@ -0,0 +1,19 @@
AC_DEFUN([TR_GTEST_HOME],[
AC_ARG_WITH([gtest],
AS_HELP_STRING([--with-gtest@<:@=DIR@:>@], [GTEST root directory]),
[GTEST_HOME="$withval"
AS_IF([test "$GTEST_HOME" = "yes"],
AC_CHECK_HEADER(gtest/gtest.h,[GTEST_HOME="/usr"],AC_MSG_ERROR([could not find gtest/gtest.h])),
AS_IF([test "$GTEST_HOME" = "no"],[GTEST_HOME=""],
AC_CHECK_FILE([$GTEST_HOME/include/gtest],
[],
AC_MSG_ERROR([could not find $GTEST_HOME/include/gtest])
)
)
)],
[AC_CHECK_HEADER(gtest/gtest.h, [GTEST_HOME="/usr"], [GTEST_HOME=""])]
)
AC_SUBST([GTEST_HOME])
])

View File

@ -0,0 +1,28 @@
AC_DEFUN([TR_HDF5_HOME],[
AC_ARG_WITH([hdf5],
AS_HELP_STRING([--with-hdf5@<:@=DIR@:>@], [HDF5 root directory]),
[HDF5_HOME="$withval"
AS_IF([test "$HDF5_HOME" = "yes"],
AC_CHECK_HEADER(hdf5.h,[HDF5_HOME="/usr"],AC_MSG_ERROR([could not find hdf5.h])),
AS_IF([test "$HDF5_HOME" = "no"],[HDF5_HOME=""],
AC_CHECK_FILE([$HDF5_HOME/include/hdf5.h],
[],
AC_MSG_ERROR([could not find $HDF5_HOME/include/hdf5.h])
)
)
)],
[
AC_CHECK_HEADER(hdf5.h, [HDF5_HOME="/usr"], [HDF5_HOME=""])
]
)
AS_IF([test "$HDF5_HOME" != ""],
[
LDFLAGS="${LDFLAGS} -L${HDF5_HOME}/lib"
AC_CHECK_LIB(hdf5, main, [],AC_MSG_ERROR([could not find libhdf5]))
]
[]
)
AC_SUBST([HDF5_HOME])
])

25
autoconf/m4/tr_java.m4 Normal file
View File

@ -0,0 +1,25 @@
dnl test if we want to use java, default to yes
AC_DEFUN([TR_JAVA],[
AC_ARG_ENABLE([java],
AS_HELP_STRING([--enable-java], [use java (default is yes)]),
AS_IF([test "x$enable_java" = xyes], [USE_JAVA="1"], [USE_JAVA="0"]),
[USE_JAVA="1"]
)
AC_SUBST([USE_JAVA])
AS_IF([test "$USE_JAVA" = "1"],[
AC_PROG_AWK
AX_PROG_JAVA_CC(javac)
JAVA_VER=`$JAVA_CC -version 2>&1 | $ac_cv_path_PERL -ne 'print $& if /\d+(\.\d+)?/'`
AC_MSG_CHECKING([$JAVA_CC version >= 1.8])
AX_COMPARE_VERSION([$JAVA_VER],[ge],[1.8], [AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_ERROR([Trick requires javac version >= 1.8])
])
AS_IF([test "$TRICK_OFFLINE" = "0"], [
AC_PATH_PROG(MVN, mvn, nomvn)
AS_IF([test "$ac_cv_path_MVN" = "nomvn"],AC_MSG_ERROR([could not find maven]),[])
], [])
],[])
])

View File

@ -0,0 +1,16 @@
dnl if fermi-ware directory exists, test for motif.
AC_DEFUN([TR_JSC_DIRS],[
AC_CHECK_FILE([trick_source/data_products/fermi-ware],
[
AC_CHECK_HEADER(Xm/Xm.h, [MOTIF_HOME="/usr"],
AC_CHECK_FILE(/usr/local/include/Xm/Xm.h, [MOTIF_HOME="/usr/local"],
AC_CHECK_FILE(/sw/include/Xm/Xm.h, [MOTIF_HOME="/sw"],AC_MSG_ERROR([could not find Xm/Xm.h]))))
],
[
MOTIF_HOME=""
]
)
AC_SUBST([MOTIF_HOME])
])

View File

@ -0,0 +1,16 @@
AC_DEFUN([TR_LLVM_HOME],[
AC_ARG_WITH([llvm],
AS_HELP_STRING([--with-llvm@<:@=DIR@:>@], [LLVM root directory]),
[LLVM_HOME="$withval"
AC_PATH_PROG(LLVM_CONFIG, llvm-config, no-llvm-config, "$LLVM_HOME/bin")
AS_IF([test "$ac_cv_path_LLVM_CONFIG" = "no-llvm-config"],AC_MSG_ERROR([could not find llvm-config]),[])
],
[
AC_PATH_PROG(LLVM_CONFIG, llvm-config, no-llvm-config, "/bin:/usr/bin:/usr/local/bin:/sw/bin:/usr/local/opt/llvm/bin")
AS_IF([test "$ac_cv_path_LLVM_CONFIG" = "no-llvm-config"],AC_MSG_ERROR([could not find llvm-config]),[])
LLVM_HOME=`$LLVM_CONFIG --prefix`
]
)
])

View File

@ -0,0 +1,13 @@
dnl Set ON_MAC=yes if we are on a mac (darwin host)
AC_DEFUN([TR_MAC_DARWIN], [
dnl AC_CANONICAL_HOST provides platform. MacOSX is diverging... have to do special things
ON_MAC=no
case "${host_os}" in
darwin*)
ON_MAC=yes
;;
*)
;;
esac
])

View File

@ -0,0 +1,19 @@
dnl test if want to prepend /usr/local/bin to PATH
dnl AC_ARG_WITH (package, help-string, [action-if-given], [action-if-not-given])
AC_DEFUN([TR_PREPEND_PATH],
[AC_ARG_WITH(
[prepend-path],
[AS_HELP_STRING([--with-prepend-path@<:@=DIR@:>@], [specify a directory to prepend to PATH (default is /usr/local/bin). Use --without-prepend-path for no directory.])],
[AS_IF(
[test "x${with_prepend_path}" = xyes],
[PATH="/usr/local/bin:${PATH}"],
[AS_IF(
[test "x${with_prepend_path}" != xno],
[PATH="${withval}:${PATH}"],
[]
)]
)],
[]
)]
)

View File

@ -0,0 +1,14 @@
AC_DEFUN([TR_SWIG_BIN],[
AC_ARG_WITH([swig],
[AS_HELP_STRING([--with-swig@<:@=DIR@:>@], [path of directory containing the SWIG executable.])],
[
TEMP_PATH="${PATH}"
PATH="$withval:${PATH}"
AX_PKG_SWIG($1, [], [AC_MSG_ERROR([Trick requires SWIG version >= 2.0])])
PATH="${TEMP_PATH}"
],
[AX_PKG_SWIG($1, [], [AC_MSG_ERROR([Trick requires SWIG version >= 2.0])])]
)
])

View File

@ -0,0 +1,10 @@
AC_DEFUN([TR_UDUNITS_HOME],[
AC_ARG_WITH([udunits],
AS_HELP_STRING([--with-udunits@<:@=DIR@:>@], [UDUnits root directory]),
[UDUNITS_HOME="$withval"],
[UDUNITS_HOME=""]
)
AC_SUBST([UDUNITS_HOME])
])

16
autoconf/m4/tr_x11.m4 Normal file
View File

@ -0,0 +1,16 @@
dnl find xml headers based on host type
AC_DEFUN([TR_X11], [
AS_IF([test "$ON_MAC" = "yes"],
[
AC_PATH_PROG(XCRUN, xcrun, noxcrun)
AS_IF([test "$ac_cv_path_XCRUN" = "norun"],AC_MSG_ERROR([could not find xcrun - install Xcode command line tools]),[])
XCODE_SDK_PATH=`$XCRUN --show-sdk-path`
XTRAINCPATHS="-I/usr/X11/include -I${XCODE_SDK_PATH}/usr/include -I${XCODE_SDK_PATH}/usr/include/libxml2"
],
[
XTRAINCPATHS="-I/usr/include/libxml2/"
])
CFLAGS="$CFLAGS $XTRAINCPATHS"
CPPFLAGS="$CPPFLAGS $XTRAINCPATHS"
])

View File

@ -0,0 +1,19 @@
dnl look for X windows libraries and headers we need to compile
AC_DEFUN([TR_XWINDOWS], [
AC_PATH_X
AS_IF([test "$no_x" = "yes"],
[
USE_X_WINDOWS=0
],
[
USE_X_WINDOWS=1
AS_IF([test "x$x_includes" = "x"],[],[X_INCLUDE_DIR=-I$x_includes])
AS_IF([test "x$x_libraries" = "x"],[],[X_LIB_DIR=-L$x_libraries])
AC_CHECK_HEADER([X11/Intrinsic.h],[],AC_MSG_ERROR([could not find libxt development headers]))
TR_JSC_DIRS([])
]
)
AC_SUBST([USE_X_WINDOWS])
AC_SUBST([X_LIB_DIR])
])

18
autoconf/missing Executable file → Normal file
View File

@ -1,9 +1,9 @@
#! /bin/sh
# Common wrapper for a few potentially missing GNU programs.
scriptversion=2012-06-26.16; # UTC
scriptversion=2018-03-07.03; # UTC
# Copyright (C) 1996-2013 Free Software Foundation, Inc.
# Copyright (C) 1996-2022 Free Software Foundation, Inc.
# Originally written by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
# This program is free software; you can redistribute it and/or modify
@ -17,7 +17,7 @@ scriptversion=2012-06-26.16; # UTC
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -101,9 +101,9 @@ else
exit $st
fi
perl_URL=http://www.perl.org/
flex_URL=http://flex.sourceforge.net/
gnu_software_URL=http://www.gnu.org/software
perl_URL=https://www.perl.org/
flex_URL=https://github.com/westes/flex
gnu_software_URL=https://www.gnu.org/software
program_details ()
{
@ -160,7 +160,7 @@ give_advice ()
;;
autom4te*)
echo "You might have modified some maintainer files that require"
echo "the 'automa4te' program to be rebuilt."
echo "the 'autom4te' program to be rebuilt."
program_details 'autom4te'
;;
bison*|yacc*)
@ -207,9 +207,9 @@ give_advice "$1" | sed -e '1s/^/WARNING: /' \
exit $st
# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# eval: (add-hook 'before-save-hook 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-time-zone: "UTC0"
# time-stamp-end: "; # UTC"
# End:

View File

@ -60,7 +60,7 @@ if ( -f $sdefine ) {
system("make -f makefile " . $makefileAddArgs) ;
exit $? >> 8;
} else {
print "S_define does not exist" ;
print "S_define does not exist\n" ;
exit 1 ;
}

View File

@ -134,7 +134,7 @@ print "-" x 50 . "\n";
printf "%25s%s", "Kelvin: ", "K, °K, degK"; print "\n";
printf "%25s%s", "Centigrade: ", "°C, degC"; print "\n";
printf "%25s%s", "Fahrenheit: ", "°F, degF"; print "\n";
printf "%25s%s", "Rankine: ", "°R, degF"; print "\n";
printf "%25s%s", "Rankine: ", "°R, degR"; print "\n";
print "\n";

505
configure generated vendored
View File

@ -717,7 +717,6 @@ infodir
docdir
oldincludedir
includedir
runstatedir
localstatedir
sharedstatedir
sysconfdir
@ -811,7 +810,6 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
@ -1064,15 +1062,6 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-runstatedir | --runstatedir | --runstatedi | --runstated \
| --runstate | --runstat | --runsta | --runst | --runs \
| --run | --ru | --r)
ac_prev=runstatedir ;;
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
| --run=* | --ru=* | --r=*)
runstatedir=$ac_optarg ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@ -1210,7 +1199,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir runstatedir
libdir localedir mandir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@ -1363,7 +1352,6 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@ -1413,10 +1401,10 @@ Optional Packages:
--without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no)
--with-x use the X Window System
--with-python[=DIR] python root directory
--without-prepend-path do not prepend to path (this is default)
--with-prepend-path[=DIR]
specify a directory to prepend to PATH (default is
/usr/local/bin)
/usr/local/bin). Use --without-prepend-path for no
directory.
--with-swig[=DIR] path of directory containing the SWIG executable.
--with-llvm[=DIR] LLVM root directory
--with-zlib=DIR root directory path of zlib installation [defaults to
@ -2406,36 +2394,6 @@ ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var.
# Make sure we can run config.sub.
$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
@ -2509,19 +2467,20 @@ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
ON_MAC=no
case "${host_os}" in
darwin*)
ON_MAC=yes
;;
*)
;;
esac
ON_MAC=no
case "${host_os}" in
darwin*)
ON_MAC=yes
;;
*)
;;
esac
if test "$ON_MAC" = "yes"; then :
if test "$ON_MAC" = "yes"; then :
# Extract the first word of "xcrun", so it can be a program name with args.
# Extract the first word of "xcrun", so it can be a program name with args.
set dummy xcrun; ac_word=$2
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
$as_echo_n "checking for $ac_word... " >&6; }
@ -2562,19 +2521,20 @@ $as_echo "no" >&6; }
fi
if test "$ac_cv_path_XCRUN" = "norun"; then :
if test "$ac_cv_path_XCRUN" = "norun"; then :
as_fn_error $? "could not find xcrun - install Xcode command line tools" "$LINENO" 5
fi
XCODE_SDK_PATH=`$XCRUN --show-sdk-path`
XTRAINCPATHS="-I/usr/X11/include -I${XCODE_SDK_PATH}/usr/include -I${XCODE_SDK_PATH}/usr/include/libxml2"
XCODE_SDK_PATH=`$XCRUN --show-sdk-path`
XTRAINCPATHS="-I/usr/X11/include -I${XCODE_SDK_PATH}/usr/include -I${XCODE_SDK_PATH}/usr/include/libxml2"
else
XTRAINCPATHS="-I/usr/include/libxml2/"
XTRAINCPATHS="-I/usr/include/libxml2/"
fi
CFLAGS="$CFLAGS $XTRAINCPATHS"
CPPFLAGS="$CPPFLAGS $XTRAINCPATHS"
CFLAGS="$CFLAGS $XTRAINCPATHS"
CPPFLAGS="$CPPFLAGS $XTRAINCPATHS"
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
@ -3218,199 +3178,6 @@ ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ex
ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for X" >&5
$as_echo_n "checking for X... " >&6; }
# Check whether --with-x was given.
if test "${with_x+set}" = set; then :
withval=$with_x;
fi
# $have_x is `yes', `no', `disabled', or empty when we do not yet know.
if test "x$with_x" = xno; then
# The user explicitly disabled X.
have_x=disabled
else
case $x_includes,$x_libraries in #(
*\'*) as_fn_error $? "cannot use X directory names containing '" "$LINENO" 5;; #(
*,NONE | NONE,*) if ${ac_cv_have_x+:} false; then :
$as_echo_n "(cached) " >&6
else
# One or both of the vars are not set, and there is no cached value.
ac_x_includes=no ac_x_libraries=no
rm -f -r conftest.dir
if mkdir conftest.dir; then
cd conftest.dir
cat >Imakefile <<'_ACEOF'
incroot:
@echo incroot='${INCROOT}'
usrlibdir:
@echo usrlibdir='${USRLIBDIR}'
libdir:
@echo libdir='${LIBDIR}'
_ACEOF
if (export CC; ${XMKMF-xmkmf}) >/dev/null 2>/dev/null && test -f Makefile; then
# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
for ac_var in incroot usrlibdir libdir; do
eval "ac_im_$ac_var=\`\${MAKE-make} $ac_var 2>/dev/null | sed -n 's/^$ac_var=//p'\`"
done
# Open Windows xmkmf reportedly sets LIBDIR instead of USRLIBDIR.
for ac_extension in a so sl dylib la dll; do
if test ! -f "$ac_im_usrlibdir/libX11.$ac_extension" &&
test -f "$ac_im_libdir/libX11.$ac_extension"; then
ac_im_usrlibdir=$ac_im_libdir; break
fi
done
# Screen out bogus values from the imake configuration. They are
# bogus both because they are the default anyway, and because
# using them would break gcc on systems where it needs fixed includes.
case $ac_im_incroot in
/usr/include) ac_x_includes= ;;
*) test -f "$ac_im_incroot/X11/Xos.h" && ac_x_includes=$ac_im_incroot;;
esac
case $ac_im_usrlibdir in
/usr/lib | /usr/lib64 | /lib | /lib64) ;;
*) test -d "$ac_im_usrlibdir" && ac_x_libraries=$ac_im_usrlibdir ;;
esac
fi
cd ..
rm -f -r conftest.dir
fi
# Standard set of common directories for X headers.
# Check X11 before X11Rn because it is often a symlink to the current release.
ac_x_header_dirs='
/usr/X11/include
/usr/X11R7/include
/usr/X11R6/include
/usr/X11R5/include
/usr/X11R4/include
/usr/include/X11
/usr/include/X11R7
/usr/include/X11R6
/usr/include/X11R5
/usr/include/X11R4
/usr/local/X11/include
/usr/local/X11R7/include
/usr/local/X11R6/include
/usr/local/X11R5/include
/usr/local/X11R4/include
/usr/local/include/X11
/usr/local/include/X11R7
/usr/local/include/X11R6
/usr/local/include/X11R5
/usr/local/include/X11R4
/usr/X386/include
/usr/x386/include
/usr/XFree86/include/X11
/usr/include
/usr/local/include
/usr/unsupported/include
/usr/athena/include
/usr/local/x11r5/include
/usr/lpp/Xamples/include
/usr/openwin/include
/usr/openwin/share/include'
if test "$ac_x_includes" = no; then
# Guess where to find include files, by looking for Xlib.h.
# First, try using that file with no special directory specified.
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <X11/Xlib.h>
_ACEOF
if ac_fn_cxx_try_cpp "$LINENO"; then :
# We can compile using X headers with no special include directory.
ac_x_includes=
else
for ac_dir in $ac_x_header_dirs; do
if test -r "$ac_dir/X11/Xlib.h"; then
ac_x_includes=$ac_dir
break
fi
done
fi
rm -f conftest.err conftest.i conftest.$ac_ext
fi # $ac_x_includes = no
if test "$ac_x_libraries" = no; then
# Check for the libraries.
# See if we find them without any special options.
# Don't add to $LIBS permanently.
ac_save_LIBS=$LIBS
LIBS="-lX11 $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <X11/Xlib.h>
int
main ()
{
XrmInitialize ()
;
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
LIBS=$ac_save_LIBS
# We can link X programs with no special library path.
ac_x_libraries=
else
LIBS=$ac_save_LIBS
for ac_dir in `$as_echo "$ac_x_includes $ac_x_header_dirs" | sed s/include/lib/g`
do
# Don't even attempt the hair of trying to link an X program!
for ac_extension in a so sl dylib la dll; do
if test -r "$ac_dir/libX11.$ac_extension"; then
ac_x_libraries=$ac_dir
break 2
fi
done
done
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi # $ac_x_libraries = no
case $ac_x_includes,$ac_x_libraries in #(
no,* | *,no | *\'*)
# Didn't find X, or a directory has "'" in its name.
ac_cv_have_x="have_x=no";; #(
*)
# Record where we found X for the cache.
ac_cv_have_x="have_x=yes\
ac_x_includes='$ac_x_includes'\
ac_x_libraries='$ac_x_libraries'"
esac
fi
;; #(
*) have_x=yes;;
esac
eval "$ac_cv_have_x"
fi # $with_x != no
if test "$have_x" != yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_x" >&5
$as_echo "$have_x" >&6; }
no_x=yes
else
# If each of the values was on the command line, it overrides each guess.
test "x$x_includes" = xNONE && x_includes=$ac_x_includes
test "x$x_libraries" = xNONE && x_libraries=$ac_x_libraries
# Update the cache value to reflect the command line values.
ac_cv_have_x="have_x=yes\
ac_x_includes='$x_includes'\
ac_x_libraries='$x_libraries'"
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: libraries $x_libraries, headers $x_includes" >&5
$as_echo "libraries $x_libraries, headers $x_includes" >&6; }
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
if ${ac_cv_path_GREP+:} false; then :
@ -3670,24 +3437,218 @@ fi
done
if test "$no_x" = "yes"; then :
USE_X_WINDOWS=0
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for X" >&5
$as_echo_n "checking for X... " >&6; }
# Check whether --with-x was given.
if test "${with_x+set}" = set; then :
withval=$with_x;
fi
# $have_x is `yes', `no', `disabled', or empty when we do not yet know.
if test "x$with_x" = xno; then
# The user explicitly disabled X.
have_x=disabled
else
case $x_includes,$x_libraries in #(
*\'*) as_fn_error $? "cannot use X directory names containing '" "$LINENO" 5;; #(
*,NONE | NONE,*) if ${ac_cv_have_x+:} false; then :
$as_echo_n "(cached) " >&6
else
# One or both of the vars are not set, and there is no cached value.
ac_x_includes=no ac_x_libraries=no
rm -f -r conftest.dir
if mkdir conftest.dir; then
cd conftest.dir
cat >Imakefile <<'_ACEOF'
incroot:
@echo incroot='${INCROOT}'
usrlibdir:
@echo usrlibdir='${USRLIBDIR}'
libdir:
@echo libdir='${LIBDIR}'
_ACEOF
if (export CC; ${XMKMF-xmkmf}) >/dev/null 2>/dev/null && test -f Makefile; then
# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
for ac_var in incroot usrlibdir libdir; do
eval "ac_im_$ac_var=\`\${MAKE-make} $ac_var 2>/dev/null | sed -n 's/^$ac_var=//p'\`"
done
# Open Windows xmkmf reportedly sets LIBDIR instead of USRLIBDIR.
for ac_extension in a so sl dylib la dll; do
if test ! -f "$ac_im_usrlibdir/libX11.$ac_extension" &&
test -f "$ac_im_libdir/libX11.$ac_extension"; then
ac_im_usrlibdir=$ac_im_libdir; break
fi
done
# Screen out bogus values from the imake configuration. They are
# bogus both because they are the default anyway, and because
# using them would break gcc on systems where it needs fixed includes.
case $ac_im_incroot in
/usr/include) ac_x_includes= ;;
*) test -f "$ac_im_incroot/X11/Xos.h" && ac_x_includes=$ac_im_incroot;;
esac
case $ac_im_usrlibdir in
/usr/lib | /usr/lib64 | /lib | /lib64) ;;
*) test -d "$ac_im_usrlibdir" && ac_x_libraries=$ac_im_usrlibdir ;;
esac
fi
cd ..
rm -f -r conftest.dir
fi
# Standard set of common directories for X headers.
# Check X11 before X11Rn because it is often a symlink to the current release.
ac_x_header_dirs='
/usr/X11/include
/usr/X11R7/include
/usr/X11R6/include
/usr/X11R5/include
/usr/X11R4/include
/usr/include/X11
/usr/include/X11R7
/usr/include/X11R6
/usr/include/X11R5
/usr/include/X11R4
/usr/local/X11/include
/usr/local/X11R7/include
/usr/local/X11R6/include
/usr/local/X11R5/include
/usr/local/X11R4/include
/usr/local/include/X11
/usr/local/include/X11R7
/usr/local/include/X11R6
/usr/local/include/X11R5
/usr/local/include/X11R4
/usr/X386/include
/usr/x386/include
/usr/XFree86/include/X11
/usr/include
/usr/local/include
/usr/unsupported/include
/usr/athena/include
/usr/local/x11r5/include
/usr/lpp/Xamples/include
/usr/openwin/include
/usr/openwin/share/include'
if test "$ac_x_includes" = no; then
# Guess where to find include files, by looking for Xlib.h.
# First, try using that file with no special directory specified.
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <X11/Xlib.h>
_ACEOF
if ac_fn_cxx_try_cpp "$LINENO"; then :
# We can compile using X headers with no special include directory.
ac_x_includes=
else
for ac_dir in $ac_x_header_dirs; do
if test -r "$ac_dir/X11/Xlib.h"; then
ac_x_includes=$ac_dir
break
fi
done
fi
rm -f conftest.err conftest.i conftest.$ac_ext
fi # $ac_x_includes = no
if test "$ac_x_libraries" = no; then
# Check for the libraries.
# See if we find them without any special options.
# Don't add to $LIBS permanently.
ac_save_LIBS=$LIBS
LIBS="-lX11 $LIBS"
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
#include <X11/Xlib.h>
int
main ()
{
XrmInitialize ()
;
return 0;
}
_ACEOF
if ac_fn_cxx_try_link "$LINENO"; then :
LIBS=$ac_save_LIBS
# We can link X programs with no special library path.
ac_x_libraries=
else
LIBS=$ac_save_LIBS
for ac_dir in `$as_echo "$ac_x_includes $ac_x_header_dirs" | sed s/include/lib/g`
do
# Don't even attempt the hair of trying to link an X program!
for ac_extension in a so sl dylib la dll; do
if test -r "$ac_dir/libX11.$ac_extension"; then
ac_x_libraries=$ac_dir
break 2
fi
done
done
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi # $ac_x_libraries = no
case $ac_x_includes,$ac_x_libraries in #(
no,* | *,no | *\'*)
# Didn't find X, or a directory has "'" in its name.
ac_cv_have_x="have_x=no";; #(
*)
# Record where we found X for the cache.
ac_cv_have_x="have_x=yes\
ac_x_includes='$ac_x_includes'\
ac_x_libraries='$ac_x_libraries'"
esac
fi
;; #(
*) have_x=yes;;
esac
eval "$ac_cv_have_x"
fi # $with_x != no
if test "$have_x" != yes; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_x" >&5
$as_echo "$have_x" >&6; }
no_x=yes
else
# If each of the values was on the command line, it overrides each guess.
test "x$x_includes" = xNONE && x_includes=$ac_x_includes
test "x$x_libraries" = xNONE && x_libraries=$ac_x_libraries
# Update the cache value to reflect the command line values.
ac_cv_have_x="have_x=yes\
ac_x_includes='$x_includes'\
ac_x_libraries='$x_libraries'"
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: libraries $x_libraries, headers $x_includes" >&5
$as_echo "libraries $x_libraries, headers $x_includes" >&6; }
fi
if test "$no_x" = "yes"; then :
USE_X_WINDOWS=0
else
USE_X_WINDOWS=1
if test "x$x_includes" = "x"; then :
USE_X_WINDOWS=1
if test "x$x_includes" = "x"; then :
else
X_INCLUDE_DIR=-I$x_includes
fi
if test "x$x_libraries" = "x"; then :
if test "x$x_libraries" = "x"; then :
else
X_LIB_DIR=-L$x_libraries
fi
ac_fn_cxx_check_header_mongrel "$LINENO" "X11/Intrinsic.h" "ac_cv_header_X11_Intrinsic_h" "$ac_includes_default"
ac_fn_cxx_check_header_mongrel "$LINENO" "X11/Intrinsic.h" "ac_cv_header_X11_Intrinsic_h" "$ac_includes_default"
if test "x$ac_cv_header_X11_Intrinsic_h" = xyes; then :
else
@ -3777,6 +3738,7 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lxml2" >&5
$as_echo_n "checking for main in -lxml2... " >&6; }
if ${ac_cv_lib_xml2_main+:} false; then :
@ -4771,7 +4733,7 @@ if ${ax_cv_gcc_version+:} false; then :
$as_echo_n "(cached) " >&6
else
ax_cv_gcc_version="`$CC -dumpversion`"
ax_cv_gcc_version="`$CC -dumpfullversion -dumpversion`"
if test "x$ax_cv_gcc_version" = "x"; then :
ax_cv_gcc_version=""
@ -5667,6 +5629,7 @@ PYTHON_LIBS=`${PYTHON_LIBS_COMMAND} | tr '\r\n' ' '`
# Check whether --with-prepend-path was given.
if test "${with_prepend_path+set}" = set; then :
withval=$with_prepend_path; if test "x${with_prepend_path}" = xyes; then :
@ -7478,6 +7441,26 @@ fi
fi
as_ac_File=`$as_echo "ac_cv_file_$LLVM_LIB_DIR/libclangSupport.a" | $as_tr_sh`
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LLVM_LIB_DIR/libclangSupport.a" >&5
$as_echo_n "checking for $LLVM_LIB_DIR/libclangSupport.a... " >&6; }
if eval \${$as_ac_File+:} false; then :
$as_echo_n "(cached) " >&6
else
test "$cross_compiling" = yes &&
as_fn_error $? "cannot check for file existence when cross compiling" "$LINENO" 5
if test -r "$LLVM_LIB_DIR/libclangSupport.a"; then
eval "$as_ac_File=yes"
else
eval "$as_ac_File=no"
fi
fi
eval ac_res=\$$as_ac_File
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
$as_echo "$ac_res" >&6; }
if eval test \"x\$"$as_ac_File"\" = x"yes"; then :
ICG_CLANGLIBS="$ICG_CLANGLIBS -lclangSupport"
fi

View File

@ -0,0 +1,13 @@
| [Home](/trick) → Developer Docs |
|------------------------------------------------------------------|
# Developer Documentation
Link documentation for Trick internals, processes, and plans here.
- [Testing](Testing)
- [How to make a new Trick release on GitHub](How-To-Make-A-Release)
- [Tooling and Sanitizers](Tooling-and-Sanitizers)
- [Python Environment](Python-Environment-Issues)

View File

@ -0,0 +1,44 @@
| [Home](/trick) → [Developer Docs](Developer-Docs-Home) → How to make a release |
|------------------------------------------------------------------|
# How To Make A Trick Release
This guide describes how to make a Trick release on GitHub
### Close Open Issues
Go through recently updated issues/pull requests and make sure that any open issues that have been fixed are merged and closed
### Make the release notes
#### Examples
https://github.com/nasa/trick/releases
#### Steps
* Review closed issues, pull requests, and commits since the last release and make a bulleted list of major changes
* UI Changes
* Header Changes
* Dependency Changes
* Major Bugfixes
* New features, tools, and example sims
* New OS/distro support
* New dependency support (such as LLVM/GCC versions)
* Major Documentation changes
### Choose a version number
* Advance major version number if there are interface changes or other major build-breaking changes
* Advance minor version number if there are major improvements or new features that are not build breaking
* Advance patch/tiny version number for hotfixes or other bugfixes
### Change the version numbers in the master branch
#### Example
https://github.com/nasa/trick/commit/a317c222748e706e89b7f344f6910d1f39824feb
#### Steps
* In share/trick/trick_ver.txt change the "current_version" string to match the new version number and remove any suffixes (like: "-beta").
* In trick_source/java/pom.xml change the \<version\> tag to the new version and remove any suffixes.
* In CMakeLists.txt change TRICK_MAJOR TRICK_MINOR TRICK_TINY to match the current version, and set TRICK_PRERELEASE to an empty string
### Create a commit, tag, and push to github.com
* `git commit -m "update version numbers for Trick Simulation Environment X.Y.Z"`
* `git tag -a <Version Number> -m "Trick Simulation Environment <Version Number>"` e.g. `git tag -a 25.0.0 -m "Trick Simulation Environment 25.0.0"`
* `git push origin <tagname>`
### Change version numbers back to prerelease and push
* Reverse the process of changing the version numbers in the files listed above; update to the next minor prerelease version (or major version if planning a major release soon).
* Remember to add the -beta suffix to the version number
* `git commit -m "update version numbers to prerelease X.Y.Z-beta"`
* `git push origin master`
### Create the release through the github UI
* This process is subject to change, so I'm posting the link to GitHub documentation here:
* https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository#creating-a-release
* Currently we do not add any artifacts/binaries or open discussions, so you can ignore those optional steps.

View File

@ -0,0 +1,57 @@
| [Home](/trick) → [Developer Docs](Developer-Docs-Home) → Python Environment Issues |
|------------------------------------------------------------------|
# Python Environment in Trick
In the `./configure` step, Trick finds the Python environment that is used as the embedded interpreter to execute input files and events.
The configure script first tries to find the Python executable and the python<version>-config script. If the `--with-python=[DIR]` option is used, it will only look in that given directory. Otherwise, it will use the system path. The python<version>-config script is used to find the location of Python.h, installed python libraries, etc.
## Help! The packages that I installed with pip are not being found by my Trick input files!
This often happens when a machine has multiple Python installations, particularly on Mac with brew python vs system python. To debug this:
Find where the `python` and `pip` (or `python3` and `pip3`) symlinks are this is the version that Trick will pick up unless you have specified something else using the `--with-python=<directory>` configure option.
```
which python3
which pip
```
These should match. cd into that directory. It was `/usr/local/bin` for me, which is the default Brew install directory for python.
Read the path for the symlinks for `python3`, `pip3`, and `python3-config` (or whichever preferred version). These paths should match as well, but often this is the problem.
```
readlink python3
/Library/Frameworks/Python.framework/Versions/3.10/bin/python3
readlink pip3
/usr/local/Cellar/python@3.11/3.11.3/bin/pip3
```
Brew is supposed to be able to detect problems like this with `brew doctor`. Mine detected that `python3.11` was unlinked:
```
Warning: You have unlinked kegs in your Cellar.
Leaving kegs unlinked can lead to build-trouble and cause formulae that depend on
those kegs to fail to run properly once built. Run `brew link` on these:
python@3.11
```
Run with the `--overwrite` flag:
```
brew link --overwrite python@3.11
```
Running readlink should now show the correct Brew install (the one with Cellar in the path) for all the Python executables.
Trick will search for the python3 and python3-config executables first on the user provided directory (if given) --with-python=<directory> and then along the system path. It then uses python3-config to find installed modules and the CPython headers and libraries. Trick assumes that these symlinks point to the same Python install, and when this assumption is violated it can cause problems with finding pip installed libaries.
This is a common problem for systems with multiple Python installs, particularly for Macs with Brew python and Xcode python. Another option is just to completely remove the Python install in /Library/Frameworks/Python.framework.
Helpful links:
https://faun.pub/the-right-way-to-set-up-python-on-your-mac-e923ffe8cf8e
https://stackoverflow.com/questions/5157678/how-do-i-use-brew-installed-python-as-the-default-python

View File

@ -0,0 +1,61 @@
| [Home](/trick) → [Developer Docs](Developer-Docs-Home) → Testing |
|------------------------------------------------------------------|
# Testing
Currently, Trick has a suite of unit and integration tests that run through a hodgepodge of GTest, Trick's internal unit test framework, Makefiles, and TrickOps.
Unit tests can be found in their respective `trick_source/[sim_services|utils]/*/test` directories, and primarily use the Gtest framework. These are run primarily through test targets in their Makefiles. See `trick_source/sim_services/MemoryManager/test` for an example of organization.
Integration tests take the form of Sims with some embedded tests. They live under `trick_sims/` and `test/`. A full list of sims that are used as part of the test suite are in [test_sims.yml](). These are run with TrickOps.
## Test suite dependencies
Gtest is required for the unit tests and some integration tests. See the [install guide](../documentation/install_guide/Install-Guide.md) for gtest installation.
TrickOps requires python3 and the packages `PyYAML` and `psutil` (updated list in [Trickops requirements.txt](https://github.com/nasa/trick/blob/master/share/trick/trickops/requirements.txt)). Install these in your python environment, or create a virtual environment as follows:
```
cd share/trick/trickops/
python3 -m venv .venv && . .venv/bin/activate && pip3 install -r requirements.txt
cd ../../../
```
## Running the test suite
From trick home:
```
# Run everything
make test
# Run only integration tests
make sim_test
# Run only unit tests
make unit_test
```
Currently, TrickOps will redirect all console output from tests into logs under {TRICK_HOME}/trickops_logs/, and will also dump the output of failing logs to console after the test suite is finished.
## Coverage
Trick uses [Coveralls](https://coveralls.io/github/nasa/trick?branch=master) to host code coverage. Coverage is generated by running the test suite with gcov in CI, and then those files are uploaded to Coveralls.
To enable gcov in Trick, it must be cleaned and compiled with the following:
```
export CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_LDFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CFLAGS="-fprofile-arcs -ftest-coverage -O0"
export TRICK_SYSTEM_CXXFLAGS="-fprofile-arcs -ftest-coverage -O0"
```
After Trick has been rebuild with the instrumentation, run:
```
make code-coverage
```
This will generate, collect, and filter all the various coverage data collection files into `coverage.info`. This is the file that is uploaded to Coveralls in the [code_coverage.yml](https://github.com/nasa/trick/blob/master/.github/workflows/code_coverage.yml) Github Actions workflow.

View File

@ -0,0 +1,35 @@
| [Home](/trick) → [Developer Docs](Developer-Docs-Home) → Tooling and Sanitizers |
|------------------------------------------------------------------|
Lots of development and debugging tools require a binary to be instrumented with compiler flags. Trick does compiling and linking steps separately and uses several variables to propogate flags to different parts of the build. The following is a convenience function that can be added to your bashrc to easily modify the flags in your environment:
```
add-trickenv () {
export CFLAGS="$CFLAGS $1"
export CXXFLAGS="$CXXFLAGS $1"
export LDFLAGS="$LDFLAGS $1"
export TRICK_CFLAGS="$TRICK_CFLAGS $1"
export TRICK_CXXFLAGS="$TRICK_CXXFLAGS $1"
export TRICK_LDFLAGS="$TRICK_LDFLAGS $1"
export TRICK_SYSTEM_CFLAGS="$TRICK_SYSTEM_CFLAGS $1"
export TRICK_SYSTEM_CXXFLAGS="$TRICK_SYSTEM_CXXFLAGS $1"
export TRICK_SYSTEM_LDFLAGS="$TRICK_SYSTEM_LDFLAGS $1"
}
```
To debug a sim, you will likely need to run a clean build of all of Trick with these flags set.
## Tools that are known to work well with Trick
GDB/LLDB: `-g`
gcov: `-fprofile-arcs -ftest-coverage -O0`
tsan: `-g -fsanitize=thread`
asan: `-g -fsanitize=address -fsanitize-recover=address`
Suggest running asan instrumented sims with:
`ASAN_OPTIONS=halt_on_error=0 ./S_main* <your args>`

View File

@ -36,6 +36,7 @@ The user guide contains information pertinent to Trick users. These pages will h
01. [Realtime Sleep Timer](simulation_capabilities/Realtime-Timer)
01. [Realtime Injector](simulation_capabilities/Realtime-Injector)
01. [Monte Carlo](simulation_capabilities/UserGuide-Monte-Carlo)
02. [Monte Carlo Generation](miscellaneous_trick_tools/MonteCarloGeneration)
01. [Master Slave](simulation_capabilities/Master-Slave)
01. [Data Record](simulation_capabilities/Data-Record)
01. [Checkpoints](simulation_capabilities/Checkpoints)
@ -48,17 +49,17 @@ The user guide contains information pertinent to Trick users. These pages will h
01. [Status Message System](simulation_capabilities/Status-Message-System)
01. [Command Line Arguments](simulation_capabilities/Command-Line-Arguments)
01. [Environment](simulation_capabilities/Environment)
01. [Standard Template Library Checkpointing](simulation_capabilities/STL-Checkpointing)
01. [Standard Template Library Checkpointing](simulation_capabilities/STL-capabilities)
01. [Threads](simulation_capabilities/Threads)
01. [Web Server](web)
01. [Adding a Web Server to Your Sim](web/Adding_a_Web_Server_to_Your_Sim)
01. Web Server APIs
01. [HTTP-API_alloc_info](web/HTTP-API_alloc_info)
01. [WS-API_VariableServer](web/WS-API_VariableServer)
01. Adding New Web Server APIs
01. [Extending_the_HTTP-API](web/Extending_the_HTTP-API)
01. [Extending_the_WS-API](web/Extending_the_WS-API)
01. [Web Server](web/Webserver)
01. [Configure Trick with Civetweb](web/Configure-Civetweb)
01. [Add SSL encryption to your webserver](web/SSL)
01. [Web Server APIs](web/Webserver-apis)
01. [HTTP alloc API](web/http-alloc-api.md)
01. [WS Variable Server API](web/ws-variable-server-api.md)
01. [Extend the HTTP API](web/Extend-http-api.md)
01. [Extend the WS API](web/Extend-ws-api.md)
01. [Simulation Utilities](simulation_utilities/Simulation-Utilities)
01. [Trickcomm](simulation_utilities/Trickcomm)

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Building a Simulation |
|------------------------------------------------------------------|
The building blocks of a basic Trick simulation are C/C++ structures/classes (models), a Python input file and a Trick simulation definition file (S_define). The S_define contains simulation objects which offer a way to turn the C/C++ function/methods into simulation jobs. Trick generates the necessary Python glue code which makes the C/C++ structures/classes accessible by the Python input file. The input file configures the simulation and is a command-line argument to the simulation executable.
... to be continued ...

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Building a Simulation](Building-a-Simulation) → Environment Variables |
|------------------------------------------------------------------|
Trick uses a list of variables for building sims e.g. TRICK_CFLAGS and TRICK_CXXFLAGS. Each variable has a default value that may be overridden by setting the value in the environment. Trick resolves these variables by a call to a function called "trick-gte". Type in "${TRICK_HOME}/bin/trick-gte" on the command line to see what the "Trick environment" is.
### Adding ${TRICK_HOME}/bin to PATH

View File

@ -1,3 +1,7 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Building a Simulation](Building-a-Simulation) → Making the Simulation |
|------------------------------------------------------------------|
### Simulation Compilation Environment Variables
The -Ipaths in TRICK_CFLAGS and TRICK_CXXFLAGS tell Trick where to find model source files. The flags also can contain compiler settings, for instance the -g flag is used for compiling in debug mode. See section Trick_Environment for more detail and information on variables for linking in external libraries, setting the compiler etc.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Building a Simulation](Building-a-Simulation) → Model Source Code |
|------------------------------------------------------------------|
This section details the syntax for creating headers and source code that Trick can process.
It also details the operation of the Trick Interface Code Generator (ICG) that processes headers, and the Module Interface Specification Processor (MIS) that processes source code.
@ -159,7 +162,7 @@ The `ICG IGNORE TYPES` field lists the structs or classes to be ignored. Any par
###### `PYTHON_MODULE`
Specifying a `python_module` name will place any class/struct and function definitions in this header file in a python module of the same name. All classes and functions are flattened into the python `trick` namespace by default. This capability allows users to avoid possible name collisions between names when they are flattened.
Specifying a `python_module` name will place any class/struct and function definitions in this header file in a python module of the same name. All classes and functions are flattened into the python `trick` namespace by default. This capability allows users to avoid possible name collisions between names when they are flattened. An empty `python_module` statement will be ignored.
##### Compiler Directives

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Building a Simulation](Building-a-Simulation) → Simulation Definition File |
|------------------------------------------------------------------|
The Simulation Definition File or S_define is the file which lays out the architecture
of the simulation. Details about job frequencies, job class, job data, importing/exporting
data to other simulations, freeze cycles, integration, etc. are all housed in this one file.
@ -374,15 +377,18 @@ This section of the S_define (encapsulated by "job_class_order{...};) can be use
scheduled loop job class order. The user may simply re-order the existing job classes that exist or
can specify a new set of scheduled loop job classes. Job classes that are eligible for reassignment
are listed in Table SD_1 between automatic and automatic_last inclusive. The order they are shown
in the table is the default ordering.
in the table is the default ordering. Note that if the user provides an ordering, classes that are
not included in the ordering (excluding automatic and automatic_last) will not be handled by any scheduler,
and therefore not executed in the sim.
```C++
job_class_order {
my_job_class_1 ;
my_job_class_2 ;
scheduled ;
my_job_class_3 ;
}
my_job_class_1 ,
my_job_class_2 ,
scheduled ,
my_job_class_3
};
```
### Simulation Object C++ properties

View File

@ -1,3 +1,7 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Building a Simulation](Building-a-Simulation) → Trickified Project Libraries |
|------------------------------------------------------------------|
During a simulation build, Trick generates several rounds of files to support data recording, checkpointing, and Python access:
* Trick generates `S_source.hh` from the `S_define`

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Data Products](Data-Products) → DP Product File Format |
|------------------------------------------------------------------|
Since Trick 10, the DP Product Specification File Format is changed to XML. The DP Product XML file
DTD is defined as following:

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Data Products](Data-Products) → DP Session File Format |
|------------------------------------------------------------------|
Since Trick 10, the DP Session file is changed to XML format. The Session XML Document
Type Definitions(DTD) is defined as following:

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Data Products](Data-Products) → Data Products GUIs |
|------------------------------------------------------------------|
There are two main GUIs for viewing Trick logged data:

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Data Products |
|------------------------------------------------------------------|
The Data Products (DP) is a simulation data post processor designed to allow visualization of data recorded in the Trick simulation.
The data products can plot ASCII, Binary & HDF5 data. HDF5 is the new data format supported since Trick 10.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Data Products](Data-Products) → Plot Printing |
|------------------------------------------------------------------|
To print fermi plots, simply bring up the fermi plot, and press either the "Print" (printer icon) button or the individual "Print" (printer icon) buttons on the plots themselves. In order for this to work you should set two environment variables:
```

View File

@ -9,16 +9,16 @@ Trick requires various free third party utilities in order to function. All the
| Utility | Version | Description | Usage | Notes |
|---------------:|:-------:|:-----------------------:|:---------------------------------------------------------:|:------------------------------------------------------|
| [gcc] and g++ | 4.8+ | C/C++ Compiler | Compiles Trick and Trick simulations. | |
| [clang]/[llvm] | <=13 (14 not currently supported) | C/C++ Compiler | Utilized by the interface code generator. | |
| [python] | 2.7+ | Programming Language | Lets the user interact with a simulation. | Trick has been tested up to python 3.9 as of 02/21 |
| [perl] | 5.6+ | Programming Language | Allows executable scripts in the bin directory to run. | |
| [java] | 11+ | Programming Language | Necessary for Trick GUIs. | |
| [swig] | 2.x-3.x | Language Interfacing | Connects the python input processor with Trick's C code. | 3.0+ required for some unit tests in make test target |
| [make] | 3.78+ | Build Automation | Automates the building and cleaning of Trick. | |
| [openmotif] | 2.2.0+ | GUI Toolkit | Covers Trick GUIs not made with Java. | |
| [udunits] | 2.x+ | C Unit Library/Database | Provides support for units of physical quantities. | |
| [maven] | x.x | Java package manager | Downloads Java dependencies and builds trick GUIs | |
| [gcc] and g++ | 4.8+ | C/C++ Compiler | Compiles Trick and Trick simulations. | |
| [clang]/[llvm] | <=14 | C/C++ Compiler | Utilized by the interface code generator. | Trick Versions <= 19.3 should use LLVM <= 9 |
| [python] | 2.7+ | Programming Language | Lets the user interact with a simulation. | Trick has been tested up to python 3.11 as of 04/23 |
| [perl] | 5.6+ | Programming Language | Allows executable scripts in the bin directory to run. | |
| [java] | 11+ | Programming Language | Necessary for Trick GUIs. | |
| [swig] | 2.x-3.x | Language Interfacing | Connects the python input processor with Trick's C code. | 3.0+ required for some unit tests in make test target. SWIG 4.x is compatible with Trick, but has some issues https://github.com/nasa/trick/issues/1288 |
| [make] | 3.78+ | Build Automation | Automates the building and cleaning of Trick. | |
| [openmotif] | 2.2.0+ | GUI Toolkit | Covers Trick GUIs not made with Java. | |
| [udunits] | 2.x+ | C Unit Library/Database | Provides support for units of physical quantities. | |
| [maven] | x.x | Java package manager | Downloads Java dependencies and builds trick GUIs | |
[gcc]: https://gcc.gnu.org/
[clang]: https://clang.llvm.org/
@ -48,27 +48,44 @@ Trick runs on GNU/Linux and macOS, though any System V/POSIX compatible UNIX wor
| Quick Jump Menu |
|---|
|[RedHat Enterprise Linux (RHEL) 8](#redhat8)|
|[CentOS 8](#redhat8)|
|[Oracle Linux 8](#redhat8)|
|[AlmaLinux 8](#redhat8)|
|[Rocky Linux 8](#redhat8)|
|[RedHat Enterprise Linux (RHEL) 7](#redhat7)|
|[CentOS 7](#redhat7)|
|[Fedora](#fedora)|
|[Ubuntu](#ubuntu)|
|[macOS](#macos)|
|[Windows 10 (Linux Subsystem Only)](#windows10)|
|[Troubleshooting](#trouble)|
---
<a name="trouble"></a>
### Troubleshooting
#### Environment Variables
Sometimes environment variables affect the Trick build and can cause it to fail. If you find one that isn't listed here, please create an issue and we'll add it to the list.
```
JAVA_HOME # Trick and Maven will use JAVA_HOME to build the GUIs instead of javac in PATH if it is set.
TRICK_HOME # This variable is optional but may cause a Trick build to fail if it is set to the wrong directory.
CFLAGS, CXXFLAGS, LDFLAGS # If these flags are set they may affect flags passed to your compiler and linker
```
#### If You Think The Install Instructions Do Not Work Or Are Outdated
If the Trick tests are passing, you can see *exactly* how we configure our test machines on Github's test integration platform, Github Actions.
If logged into any github account on github.com, you can access the [Actions](https://github.com/nasa/trick/actions) tab on the Trick repo page. Go to [Trick-CI](https://github.com/nasa/trick/actions?query=workflow%3A%22Trick+CI%22), and click the latest passing run. Here you can access a log of our shell commands to configure each OS with dependencies and also the commands we use to install Trick. In fact, that is exactly where I go when I want to update the install guide! @spfennell
The configuration for these tests can be found in the [trick/.github/workflow/test.yml](https://github.com/nasa/trick/blob/master/.github/workflows/test.yml) file.
The configuration for these tests can be found in the [trick/.github/workflow/test_linux.yml](https://github.com/nasa/trick/blob/master/.github/workflows/test_linux.yml) file.
#### Weird Linker Error
It is possible you may have an old version of Trick installed, and Trick's libraries are on your LDPATH and interfering with your new build. The solution is to uninstall the old version before building the new one. Call `sudo make uninstall` from any Trick top level directory and it will remove the old libraries.
---
<a name="redhat8"></a>
### RedHat Enterprise Linux (RHEL) 8, CentOS 8
### RedHat Enterprise Linux (RHEL) 8, Oracle Linux 8, Rocky Linux 8, AlmaLinux 8
Trick requires the clang/llvm compiler to compile and link the Trick Interface Code Generator. clang/llvm is available through the [Extra Packages for Enterprise Linux](https://fedoraproject.org/wiki/EPEL) repository. Download and install the 'epel-release' package.
@ -84,12 +101,21 @@ python3-devel diffutils
Trick makes use of several optional packages if they are present on the system. These include using the HDF5 package for logging, the GSL packages for random number generation, and google test (gtest) for Trick's unit testing. These are available from the EPEL repository. In order to access gtest-devel in the epel repository you need to enable the dnf option PowerTools
Trick makes use of several optional packages if they are present on the system. These include using the HDF5 package for logging, the GSL packages for random number generation, and google test (gtest) for Trick's unit testing. These are available from the EPEL repository. In order to access gtest-devel in the epel repository on RHEL 8 you need to enable the dnf repo CodeReady Linux Builder. In Rocky Linux and Alma Linux you can instead enable the Power Tools Repo. On Oracle Linux 8 you must enable OL8 CodeReady Builder.
See RedHat's documentation to enable the CodeReady Linux Builder repository:
https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/package_manifest/codereadylinuxbuilder-repository
On AlmaLinux 8, Rocky Linux 8:
```bash
yum install -y 'dnf-command(config-manager)'
yum config-manager --enable PowerTools
yum install hdf5-devel gsl-devel gtest-devel
dnf config-manager --enable powertools
dnf install -y gtest-devel
```
On Oracle Linux 8:
```
dnf config-manager --enable ol8_codeready_builder
dnf install -y gtest-devel
```
proceed to [Install Trick](#install) section of the install guide
@ -168,6 +194,7 @@ export PYTHON_VERSION=3
proceed to [Install Trick](#install) section of the install guide
---
<a name="macos"></a>
### macOS Monterey, Big Sur, Catalina
#### These instructions are for Intel-based macs. For the latest Apple silicon (M1) instructions see this issue: https://github.com/nasa/trick/issues/1283
@ -188,10 +215,10 @@ xcode-select --install
brew install python java xquartz swig@3 maven udunits openmotif
```
IMPORTANT: Make sure to follow the instructions for adding java to your path provided by brew. If you missed them, you can see them again by using `brew info java`.
IMPORTANT: Make sure to follow the instructions for adding java and swig to your `PATH` provided by brew. If you missed them, you can see them again by using `brew info java` and `brew info swig@3`. Remember, you may need to restart your terminal for these `PATH` changes to take effect.
5. Download and un-compress the latest pre-built clang+llvm 13 from llvm-project github. Go to https://github.com/llvm/llvm-project/releases
and download the latest version of 13 from the release assets. 13.0.1 is the latest as of the writing of this guide, the link I used is below:
5. Download and un-compress the latest pre-built clang+llvm from llvm-project github. Go to https://github.com/llvm/llvm-project/releases
and download the latest version llvm that matches your Xcode version from the release assets. For example, if your Xcode version is 14 then you will want the latest 14.x.x release of llvm. 13.0.1 is the latest as of the writing of this guide, the link I used is below:
https://github.com/llvm/llvm-project/releases/download/llvmorg-13.0.1/clang+llvm-13.0.1-x86_64-apple-darwin.tar.xz
Tip: I suggest renaming the untar'd directory to something simple like llvm13 and putting it in your home directory or development environment.
@ -214,6 +241,17 @@ e.g.
./configure --with-llvm=/Users/trickguy/llvm13 --with-udunits=/usr/local/Cellar/udunits/2.2.28
```
OPTIONAL: Trick uses google test (gtest) version 1.8 for unit testing. To install gtest:
```
brew install cmake wget
wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz
tar xzvf release-1.8.0.tar.gz
cd googletest-release-1.8.0/googletest
cmake .
make
make install
```
proceed to [Install Trick](#install) section of the install guide
---
@ -327,3 +365,12 @@ cp prebuiltTrick/libexec/trick/java/build/*.jar trick/trick-offline
```
4. Follow regular install instructions above.
### Python Version
If you would like to use Python 2 with Trick please first make sure Python 2 and the Python 2 libs are installed. Then you will likely need to set `PYTHON_VERSION=2` in your shell environment before executing the `configure` script so that Trick will use Python 2 instead of Python 3. This can be done in bash or zsh with the following commands:
```
export PYTHON_VERSION=2
./configure
```

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Introduction |
|------------------------------------------------------------------|
The responsibility for this document lies with the [Simulation and Graphics Branch (ER7)](https://er.jsc.nasa.gov/ER7/) of the [Automation, Robotics and Simulation Division](https://er.jsc.nasa.gov/) of the NASA JSC Engineering Directorate.
The purpose of this document is to provide Trick simulation developers and users with a detailed users reference guide on how to install Trick, use Trick processors and utilities, and how to operate a simulation from execution to data post processing.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Miscellaneous Trick Tools |
|------------------------------------------------------------------|
### Interface Code Generator - ICG

View File

@ -0,0 +1,705 @@
# MonteCarloGeneration Model
# Revision History
| Version | Date | Author | Purpose |
| :--- |:---| :--- | :--- |
| 1 | April 2020 | Gary Turner | Initial Version |
| 2 | March 2021 | Gary Turner | Added Verification |
| 3 | October 2022 | Isaac Reaves | Converted to Markdown |
# 1 Introduction
The MonteCarlo Model is used to disperse the values assigned to variables at the start of a simulation. Dispersing the initial
conditions and configurations for the simulation allows for robust testing and statistical analysis of the probability of
undesirable outcomes, and measuring the confidence levels associated with achieving desirable outcomes.
Conventionally, most of the time we think about dispersing variables, we think about apply some sort of statistical
distribution to the value. Most often, that is a normal or uniform distribution, but there may be situations in which other
distributions are desired. In particular, this model provides an extensible framework allowing for any type of distribution to
be applied to any variable.
For extensive analysis of safety-critical scenarios, where it is necessary to demonstrate high probability of success with high
confidence, traditional MonteCarlo analyses require often many thousands of runs. For long duration simulations, it may
not be feasible to run the number of simulations necessary to reach the high confidence of high success probability that is
necessary to meet requirements. Typically, failure cases occur out near the edges of state-space, but most of the runs will be
“right down the middle”; using conventional MonteCarlo techniques, most of these runs are completely unnecessary. With
a Sequential-MonteCarlo configuration, a small number of runs can be executed, allowing for identification of problem
areas, and a focussing of the distribution on those areas of state-space, thereby reducing the overall number of runs while
adding complexity to the setup. While this model does not (at this time) provide a Sequential-MonteCarlo capability, the
organization of the model has been designed to support external tools seeking to sequentially modify the distributions being
applied to the dispersed variables, and generate new dispersion sets.
# 2 Requirements
1. The model shall provide common statistical distribution capabilities, including:
1. Uniform distribution between specified values
1. as a floating-point value
1. as an integer value
1. Normal distribution, specified by mean and standard deviation
1. Truncated Normal Distribution, including
1. symmetric and asymmetric truncations
1. it shall be possible to specify truncations by:
1. some number of standard deviations from the mean,
1. a numerical difference from the mean, and
1. an upper and lower limit
1. The model shall provide an extensible framework suitable for supporting other statistical distributions
1. The model shall provide the ability to assign a common value to all runs:
1. This value could be a fixed, user-defined value
1. This value could be a random assignment, generated once and then applied to all runs
1. The model shall provide the capability to read values from a pre-generated file instead of generating its own values
1. The model shall provide the ability to randomly select from a discrete data set, including:
1. enumerations,
1. character-strings,
1. boolean values, and
1. numerical values
1. The model shall provide the capability to compute follow-on variables, the values of which are a function of one or more dispersed variables with values generated using any of the methods in requirements 1-5.
1. The model shall provide a record of the generated distributions, allowing for repeated execution of the same scenario using exactly the same conditions.
1. The model shall provide summary data of the dispersions which have been applied, including:
1. number of dispersions
1. types of dispersions
1. correlations between variables
# 3 Model Specification
## 3.1 Code Structure
The model can be broken down into its constituent classes; there are two principle components to the model the variables,
and the management of the variables.
### 3.1.1 Variable Management (MonteCarloMaster)
MonteCarloMaster is the manager of the MonteCarlo variables. This class controls how many sets of dispersed variables
are to be generated; for each set, it has the responsibility for
* instructing each variable to generate its own dispersed value
* collecting those values and writing them to an external file
### 3.1.2 Dispersed Variables (MonteCarloVariable)
MonteCarloVariable is an abstract class that forms the basis for all dispersed variables. The following classes inherit from
MonteCarloVariable:
* MonteCarloVariableFile will extract a value for its variable from a specified text file. Typically, a data file will
comprise some number of rows and some number of columns of data. Each column of data represents the possible
values for one variable. Each row of data represents a correlated set of data to be applied to several variables; each
data-set generation will be taken from one line of data. Typically, each subsequent data-set will be generated from the
next line of data; however, this is not required.
* In some situations, it is desirable to have the next line of data to be used for any given data set be somewhat randomly
chosen. This has the disadvantageous effect of having some data sets being used more than others, but it supports
better cross-population when multiple data files are being used.
* For example, if file1 contained 2 data sets and file2 contained 4 data sets, then a sequential sweep through
these file would set up a repeating pattern with line 1 of file2 always being paired with line 1 of file1. For
example, in 8 runs, we would get this pattern of line numbers from each run:
* (1,1), (2,2), (1,3), (2,4), (1,1), (2,2), (1,3), (2,4)
* If the first file was allowed to skip a line, the pattern can produce a more comprehensive combination of
data:
* (1,1), (1,2), (2,3), (1,4), (2,1), (2,2), (2,3), (1,4)
* MonteCarloVariableFixed provides fixed-values to a variable for all generated data-sets. The values can be
represented as a double, int, or STL-string.
* MonteCarloVariableRandom is the base class for all variables being assigned a random value. The values can be
represented as a double, int, or STL-string. There are several subclasses:
* MonteCarloVariableRandomNormal provides a variable with a value dispersed according to a normal
distribution specified by its mean and standard deviation.
* MonteCarloVariableRandomUniformInt provides a variable with a value dispersed according to a uniform
distribution specified by its upper and lower bounds. This class represents a discrete distribution, providing an
integer value.
* MonteCarloVariableRandomUniform provides a variable with a value dispersed according to a uniform
distribution specified by its upper and lower bounds. This class represents a continuous distribution.
* MonteCarloVariableRandomStringSet represents a discrete variable, drawn from a set of STL-strings. The
class inherits from MonteCarloVariableRandomUniform; this distribution generates a continuous value in [0,1)
and scales and casts that to an integer index in {0, …, size-1} where size is the number of available strings
from which to choose.
Note an astute reader may question why the discrete MonteCarloVariableRandomStringSet inherits from
the continuous MonteCarloVariableRandomUniform rather than from the discrete
MonteCarloVariableRandomUniformInt. The rationale is based on the population of the vector of
selectable strings in this class. It is desirable to have this vector be available for population outside the
construction of the class, so at construction time the size of this vector is not known. However, the
construction of the MonteCarloVariableRandomUniformInt requires specifying the lower and upper
bounds, which would be 0 and size-1 respectively. Because size is not known at construction, this cannot
be specified. Conversely, constructing a MonteCarloVariableRandomUniform with bounds at [0,1) still
allows for scaling to the eventual size of the strings vector.
* MonteCarloVariableSemiFixed utilizes a two-step process. First, a seed-variable has its value generated, then that
value is copied to this variable. The seed-variable could be a “throw-away” variable, used only to seed this value, or it
could be an instance of another dispersed variable. Once the value has been copied to this instance, it is retained in this
instance for all data sets. The seed-variable will continue to generate a new value for each data set, but they will not be
seen by this variable after that first set.
The seed-variable can be any type of MonteCarloVariable, but note that not all types of MonteCarloVariable actually
make sense to use in this context. Most of the usable types are specialized types of MonteCarloVariableRandom.
However, restricting the seed-variable in such a way would limit the extensibility of the model. All
MonteCarloVariableRandom types use the C++ \<random\> library for data generation. Limiting the
MonteCarloVariableSemiFixed type to be seeded only by something using the \<random\> library violates the concept of
free-extensibility. Consequently, the assigned value may be extracted from any MonteCarloVariable type. The only
constraint is that the command generated by the seed-variable includes an “=” symbol; everything to the right of that
symbol will be assigned to this variable.
* MonteCarloPythonLineExec provides a line of executable Python code that can be used to compute the value of this
variable. So rather than generating an assignment statement, e.g.
```
var_x = 5
```
when the MonteCarloMaster processes an instance of this class, it will use a character string to generate an
instruction statement, e.g.
```
var_x = math.sin(2 * math.pi * object.circle_fraction)
```
(in this case, the character string would be “math.sin(2 * math.pi * object.circle_fraction)” and
object.circle_fraction could be a previously-dispersed variable).
A particularly useful application of this capability is in generating systematic data sweeps across a domain, as
opposed to random distributions within a domain. These are commonly implemented as a for-loop, but we can use
the MonteCarloPythonLineExec to generate them internally. The first data assignment made in each file is to a
run-number, which can be used as an index. The example shown below will generate a sweep across the domain
[20,45) in steps of 2.5.
```
object.sweep_variable = (monte_carlo.master.monte_run_number % 10) * 2.5 + 20
```
* MonteCarloPythonFileExec is used when simple assignments and one-line instructions are insufficient, such as
when one generated-value that feeds into an analytical algorithm to generate multiple other values. With this class,
the execution of the Python file generated by MonteCarloMaster will hit a call to execute a file as specified by this
class. This is an oddity among the bank of MonteCarloVariable implementations. In all other implementations,
the identifying variable_name is used to identify the variable whose value is to be assigned (or computed). With
the MonteCarloPythonFileExec implementation, the variable_name is hijacked to provide the name of the file to
be executed.
## 3.2 Mathematical Formulation
No mathematical formulation. The random number generators use the C++ \<random\> library.
# 4 User's Guide
## 4.1 What to expect
This role played by this model can be easily misunderstood, so lets start there.
**This model generates Python files containing assignments to variables.**
Thats it!! It does not manage MonteCarlo runs. It does not execute any simulations. When it runs, it creates the requested
number of Python files and exits.
This design is deliberate; we want the model to generate the instruction sets that will allow execution of a set of dispersed
configurations. At that point, the simulation should cease, returning control to the user to distribute the execution of those
configurations according to whatever distribution mechanism they desire. This could be:
* something really simple, like a wild-card, \<executive\> `MONTE_RUN_test/RUN*/monte_input.py`
* a batch-script,
* a set of batch-scripts launching subsets onto different machines,
* a load-management service, like SLURM
* any other mechanism tailored to the users currently available computing resources
The intention is that the model runs very early in the simulation sequence. If the model is inactive (as when running a regular, non-MonteCarlo run), it will take no action. But when this model is activated, the user should expect the simulation to terminate before it starts on any propagation.
**When a simulation executes with this model active, the only result of the simulation will be the generation of files containing the assignments to the dispersed variables. The simulation should be expected to terminate at t=0.**
## 4.1.1 Trick Users
The model is currently configured for users of the Trick simulation engine. The functionality of the model is almost exclusively independent of the chosen simulation engine, with the exceptions being the shutdown sequence, and the application of units information in the variables.
Found at the end of the `MonteCarloMaster::execute()` method, the following code:
```c++
exec_terminate_with_return(0, __FILE__, __LINE__,message.c_str());
```
is a Trick instruction set to end the simulation.
Found at the end of `MonteCarloVariable::insert_units()`, the following code:
```c++
// TODO: Pick a unit-conversion mechanism
// Right now, the only one available is Trick:
trick_units( pos_equ+1);
```
provides the call to
```c++
MonteCarloVariable::trick_units(
size_t insertion_pt)
{
command.insert(insertion_pt, " trick.attach_units(\"" + units + "\",");
command.append(")");
```
which appends Trick instructions to interpret the generated value as being represented in the specified units.
The rest of the Users Guide will use examples of configurations for Trick-simulation input files
## 4.1.2 Non-Trick Users
To configure the model for simulation engines other than Trick, the Trick-specific content identified above should be replaced with equivalent content that will result in:
* the shutdown of the simulation, and
* the conversion of units from the type specified in the distribution specification to the type native to the variable to which the generated value is to be assigned.
While the rest of the Users Guide will use examples of configurations for Trick-simulation input files, understand that these are mostly just C++ or Python code setting the values in this model to make it work as desired. Similar assignments would be required for any other simulation engine.
## 4.2 MonteCarlo Manager (MonteCarloMaster)
### 4.2.1 Instantiation
The instantiation of MonteCarloMaster would typically be done directly in the S_module. The construction of this instance takes a single argument, a STL-string describing its own location within the simulation data-structure.
The MonteCarloMaster class has a single public-interface method call, `MonteCarloMaster::execute()`. This method has 2 gate-keeping flags that must be set (the reason for there being 2 will be explained later):
* `active`
* `generate_dispersions`
If either of these flags is false (for reference, `active` is constructed as false and `generate_dispersions` is constructed as true) then this method returns with no action. If both are true, then the model will generate the dispersions, write those dispersions to the external files, and shut down the simulation.
An example S-module
```c++
class MonteCarloSimObject : public Trick::SimObject
{
public:
MonteCarloMaster master; // <--- master is instantiated
MonteCarloSimObject(std::string location)
:
master(location) // <--- master is constructed with this STL-string
{
P_MONTECARLO ("initialization") master.execute(); // <--- the only function
call
}
};
MonteCarloSimObject monte_carlo("monte_carlo.master"); // <--- location of master
is passed as an
argument
```
### 4.2.2 Configuration
The configuration of the MonteCarloMaster is something to be handled as a user-input to the simulation without requiring re-compilation; as such, it is typically handled in a Python input file. There are two sections for configuration:
* modifications to the regular input file, and
* new file-input or other external monte-carlo initiation mechanism
#### 4.2.2.1 Modifications to the regular input file
A regular input file sets up a particular scenario for a nominal run. To add monte-carlo capabilities to this input file, the
following code should be inserted somewhere in the file:
```python
if monte_carlo.master.active:
# insert non-mc-variable MC configurations like logging
if monte_carlo.master.generate_dispersions:
exec(open(“Modified_data/monte_variables.py").read())
```
Lets break this down, because it explains the reason for having 2 flags:
| `generate_dispersions` | `active` | Result |
| :--- |:---| :--- |
| false | false | Regular (non-monte-carlo) run |
| false | true | Run scenario with monte-carlo configuration and pre-generated dispersions |
| true | false | Regular (non-monte-carlo) runs |
| true | true | Generate dispersions for this scenario, but do not run the scenario |
1. If the master is inactive, this content is passed over and the input file runs just as it would without this content
2. Having the master `active` flag set to true instructs the simulation that the execution is intended to be part of a monte-carlo analysis. Now there are 2 types of executions that fit this intent:
* The generation of the dispersion files
* The execution of this run with the application of previously-generated dispersions
Any code to be executed for case (a) must go inside the `generate_dispersions` gate. Any code to be executed for
case (b) goes inside the `active` gate, but outside the `generate_dispersions` gate.
You may wonder why this distinction is made. In many cases, it is desirable to make the execution for monte-carlo
analysis subtly different to that for regular analysis. One commonly used distinction is logging of data; the logging
requirement may differ between a regular run and one as part of a monte-carlo analysis (typically, monte-carlo runs
execute with reduced logging). By providing a configuration area for a monte-carlo run, we can support these
distinctions.
Note any code to be executed for only non-monte-carlo runs can be put in an else: block. For example, this code
will set up one set of logging for a monte-carlo run, and another for a non-monte-carlo run of the same scenario:
```python
if monte_carlo.master.active:
exec(open(“Log_data/log_for_monte_carlo.py”).read())
if monte_carlo.master.generate_dispersions:
exec(open(“Modified_data/monte_variables.py").read())
else:
exec(open(“Log_data/log_for_regular.py”).read())
```
3. If the `generate_dispersions` flag is also set to true, the `MonteCarloMaster::execute()` method will execute,
generating the dispersion files and shutting down the simulation.
#### 4.2.2.2 Initiating MonteCarlo
Somewhere outside this file, the `active` and generate_dispersion flags must be set. This can be performed either in a separate input file or via a command-line argument. Unless the command-line argument capability is already supported, by far the easiest mechanism is to create a new input file that subsequently reads the existing input file:
```
monte_carlo.master.activate("RUN_1")
exec(open("RUN_1/input.py").read())
```
The activate method takes a single string argument, representing the name of the run. This must be exactly the same name as the directory containing the original input file, “RUN_1” in the example above. This argument is used in 2 places (\<argument\> in these descriptions refers to the content of the argument string):
* In the creation of a `MONTE_<argument>` directory. This directory will contain some number of sub-directories identified as, for example, RUN_01, RUN_02, RUN_03, etc. each of which will contain one of the generated dispersion files.
* In the instructions written into the generated dispersion files to execute the content of the input file found in `<argument>`.
#### 4.2.2.3 Additional Configurations
There are additional configurations instructing the MonteCarloMaster on the generation of the new dispersion files. Depending on the use-case, these could either be embedded within the `if monte_carlo.master.generate_dispersions:` block of the original input file, or in the secondary input file (or command-line arguments if configured to do so).
* Number of runs is controlled with a single statement, e.g.
```monte_carlo.master.set_num_runs(10)```
* Generation of meta-data. The meta-data provides a summary of which variables are being dispersed, the type of dispersion applied to each, the random seeds being used, and correlation between different variables. This is written out to a file called MonteCarlo_Meta_data_output in the MONTE_* directory.
```monte_carlo.master.generate_meta_data = True```
* Changing the name of the automatically-generated monte-directory. By default, this takes the form “MONTE_\<run_name\>” as assigned in the MonteCarloMaster::activate(...) method. The monte_dir variable is public and can be reset after activation and before the `MonteCarloMaster::execute()` method runs. This is particularly useful if it is desired to compare two distribution sets for the same run.
```monte_carlo.master.monte_dir = “MONTE_RUN_1_vers2”```
* Changing the input file name. It is expected that most applications of this model will run with a typical organization of a Trick simulation. Consequently, the original input file is probably named input.py, and this is the default setting for the input_file_name variable. However, to support other cases, this variable is public and can be changed at any time between construction and the execution of the `MonteCarloMaster::execute()` method.
```monte_carlo.master.input_file_name = “modified_input.py”```
* Padding the filenames of the generated files. By default, the generated RUN directories in the generated MONTE_* directory will have their numerical component padded according to the number of runs. When:
* between 1 - 10 runs are generated, the directories will be named RUN_0, RUN_1, …
* between 11-100 runs are generated, the directories will be named RUN_00, RUN_01, …
* between 101-1000 runs are generated, the directories will be named RUN_000, RUN_001, …
* etc.
Specification of a minimum padding width is supported. For example, it might be desired to create 3 runs with names RUN_00000, RUN_00001, and RUN_00002, in which case the minimum-padding should be specified as 5 characters
```monte_carlo.master.minimum_padding = 5```
* Changing the run-name. For convenience, the run-name is provided as an argument in the MonteCarloMaster::activate(...) method. The run_name variable is public, and can be reset after activation and before the `MonteCarloMaster::execute()` method runs. Because this setting determines which run is to be launched from the dispersion files, resetting run_name has limited application effectively limited to correcting an error, which could typically be more easily corrected directly.
```monte_carlo.master.run_name = “RUN_2”```
## 4.3 MonteCarlo Variables (MonteCarloVariable)
The instantiation of the MonteCarloVariable instances is typically handled as a user-input to the simulation without requiring re-compilation. As such, these are usually implemented in Python input files. This is not a requirement, and these instances can be compiled as part of the simulation build. Both cases are presented.
### 4.3.1 Instantiation and Registration
For each variable to be dispersed, an instance of a MonteCarloVariable must be created, and that instance registered with the MonteCarloMaster instance:
1. Identify the type of dispersion desired
2. Select the appropriate type of MonteCarloVariable to provide that dispersion.
3. Create the new instance using its constructor.
4. Register it with the MonteCarloMaster using the `MonteCarloMaster::add_variable( MonteVarloVariable&)` method
#### 4.3.1.1 Python input file implementation for Trick:
When the individual instances are registered with the master, it only records the address of those instances. A user may create completely new variable names for each dispersion, or use a generic name as illustrated in the example below. Because these are typically created within a Python function, it is important to add the thisown=False instruction on each creation to prevent its destruction when the function returns.
```python
mc_var = trick.MonteCarloVariableRandomUniform( "object.x_uniform", 0, 10, 20)
mc_var.thisown = False
monte_carlo.master.add_variable(mc_var)
mc_var = trick.MonteCarloVariableRandomNormal( "object.x_normal", 0, 0, 5)
mc_var.thisown = False
monte_carlo.master.add_variable(mc_var)
```
#### 4.3.1.2 C++ implementation in its own class:
In this case, the instances do have to be uniquely named.
Note that the registering of the variables could be done in the class constructor rather than in an additional method (process_variables), thereby eliminating the need to store the reference to MonteCarloMaster. In this case, the `generate_dispersions` flag is completely redundant because the variables are already registered by the time the input file is executed. Realize, however, that doing so does carry the overhead of registering those variables with the MonteCarloMaster every time the simulation starts up. This can a viable solution when there are only a few MonteCarloVariable instances, but is generally not recommended; using an independent method (process_variables) allows restricting the registering of the variables to be executed only when generating new dispersions.
```c++
class MonteCarloVarSet {
private:
MonteCarloMaster & master;
public:
MonteCarloVariableRandomUniform x_uniform;
MonteCarloVariableRandomNormal x_normal;
...
MonteCarloVarSet( MonteCarloMaster & master_)
:
master(master_),
x_uniform("object.x_uniform", 0, 10, 20),
x_normal ("object.x_normal", 0, 0, 5),
...
{ };
void process_variables() {
master.add_variable(x_uniform);
master.add_variable(x_normal);
...
}
};
```
#### 4.3.1.3 C++ implementation within a Trick S-module:
Instantiating the variables into the same S-module as the master is also a viable design pattern. However, this can lead to a very long S-module so is typically only recommended when there are few variables. As with the C++ implementation in a class, the variables can be registered with the master in the constructor rather than in an additional method, with the same caveats presented earlier.
```c++
class MonteCarloSimObject : public Trick::SimObject
{
public:
MonteCarloMaster master;
MonteCarloVariableRandomUniform x_uniform;
MonteCarloVariableRandomNormal x_normal;
...
MonteCarloSimObject(std::string location)
:
master(location),
x_uniform("object.x_uniform", 0, 10, 20),
x_normal ("object.x_normal", 0, 0, 5),
...
{ };
void process_variables() {
master.add_variable(x_uniform);
master.add_variable(x_normal);
...
};
{
P_MONTECARLO ("initialization") master.execute();
} };
MonteCarloSimObject monte_carlo("monte_carlo.master");
```
### 4.3.2 input-file Access
If using a (compiled) C++ implementation with the registration conducted at construction, the `generate_dispersions` flag is not used in the input file.
```python
if monte_carlo.master.active:
if monte_carlo.master.generate_dispersions:
exec(open(“Modified_data/monte_variables.py").read())
```
(where monte_variables.py is the file containing the mc_var = … content described earlier)
```python
if monte_carlo.master.active:
if monte_carlo.master.generate_dispersions:
monte_carlo_variables.process_variables()
```
If using a (compiled) C++ implementation with a method to process the registration, that method call must be contained inside the `generate_dispersions` gate in the input file:
```
if monte_carlo.master.active:
# add only those lines such as logging configuration
```
### 4.3.3 Configuration
For all variable-types, the variable_name is provided as the first argument to the constructor. This variable name must include the full address from the top level of the simulation. After this argument, each variable type differs in its construction arguments and subsequent configuration options.
#### 4.3.3.1 MonteCarloVariable
MonteCarloVariable is an abstract class; its instantiable implementations are presented below. There is one important configuration for general application to these implementations, the setting of units. In a typical simulation, a variable has an inherent unit-type; these are often SI units, but may be based on another system. Those native units may be different to those in which the distribution is described. In this case, assigning the generated numerical value to the variable without heed to the units mismatch would result in significant error.
```set_units(std::string units)```
This method specifies that the numerical value being generated is to be interpreted in the specified units.
Notes
* if it is known that the variables native units and the dispersion units match (including the case of a dimensionless value), this method is not needed.
* This method is not applicable to all types of MonteCarloVariable; use with MonteCarloVariableRandomBool and MonteCarloPython* is considered undefined behavior.
#### 4.3.3.2 MonteCarloVariableFile
The construction arguments are:
1. variable name
2. filename containing the data
3. column number containing data for this variable
4. (optional) first column number. This defaults to 1, but some users may want to zero-index their column numbers, in which case it can be set to 0.
There is no additional configuration beyond the constructor
There is no additional configuration beyond the constructor.
#### 4.3.3.3 MonteCarloVariableFixed
The construction arguments are:
1. variable name
2. value to be assigned
Additional configuration for this model includes the specification of the maximum number of lines to skip between runs.
`max_skip`. This public variable has a default value of 0 meaning that the next run will be drawn from the next line of data, but this can be adjusted.
#### 4.3.3.4 MonteCarloVariableRandomBool
The construction arguments are:
1. variable name
2. seed for random generator
There is no additional configuration beyond the constructor.
#### 4.3.3.5 MonteCarloVariableRandomNormal
The construction arguments are:
1. variable name
2. seed for random generator, defaults to 0
3. mean of distribution, defaults to 0
4. standard-deviation of distribution, defaults to 1.
The normal distribution may be truncated, and there are several configuration settings associated with truncation. Note that for all of these truncation options, if the lower truncation bound is set to be larger than the upper truncation bound, the generation of the dispersed value will fail and the simulation will terminate without generation of files. If the upper andlower bound are set to be equal, the result will be a forced assignment to that value.
`TruncationType`
This is an enumerated type, supporting the specification of the truncation limits in one of three ways:
* `StandardDeviation`: The distribution will be truncated at the specified number(s) of standard deviations away from the mean.
* `Relative`: The distribution will be truncated at the specified value(s) relative to the mean value.
* `Absolute`: The distribution will be truncated at the specified value(s).
`max_num_tries`
The truncation is performed by repeatedly generating a number from the unbounded distribution until one is found that lies within the truncation limits. This max_num_tries value determines how many attempts may be made before the algorithm concedes. It defaults to 10,000. If a value has not been found within the specified number of tries, an error message is sent and the value is calculated according to the following rules:
* For a distribution truncated at only one end, the truncation limit is used
* For a distribution truncated at both ends, the midpoint value between the two truncation limits is used.
`truncate( double limit, TruncationType)`
This method provides a symmetric truncation, with the numerical value provided by limit being interpreted as a number of standard-deviations either side of the mean, a relative numerical value from the mean, or an absolute value.
The value limit should be positive. If a negative value is provided, it will be negated to a positive value.
The use of TruncationType Absolute and this method requires a brief clarification because this may result in an asymmetric distribution. In this case, the distribution will be truncated to lie between (-limit, limit) which will be asymmteric for all cases in which the mean is non-zero.
`truncate( double min, double max, TruncationType)`
This method provides a more general truncation, with the numerical value provided by min and max being interpreted as a number of standard-deviations away from the mean, a relative numerical value from the mean, or an absolute value.
Unlike the previous method, the numerical arguments (min and max) may be positive or negative, and care must be taken especially when specifying min with TruncationType StandardDeviation or Relative. Realize that a positive value of min will result in a lower bound with value above that of the mean; min does not mean “distance to the left of the mean”, it means the smallest acceptable value relative to the mean.
`truncate_low( double limit, TruncationType)`
This method provides a one-sided truncation. All generated values will be above the limit specification.
`truncate_high( double limit, TruncationType)`
This method provides a one-sided truncation. All generated values will be below the limit specification.
`untruncate()`
This method removes previously configured truncation limits.
#### 4.3.3.6 MonteCarloVariableRandomStringSet
The construction arguments are:
1. variable name
2. seed for random generator
This type of MonteCarloVariable contains a STL-vector of STL-strings containing the possible values that can be assigned by this generator. This vector is NOT populated at construction time and must be configured.
`add_string(std::string new_string)`
This method adds the specified string (`new_string`) to the vector of available strings
#### 4.3.3.7 MonteCarloVariableRandomUniform
The construction arguments are:
1. variable name
2. seed for random generator, defaults to 0
3. lower-bound of distribution, default to 0
4. upper-bound for distribution, defaults to 1
There is no additional configuration beyond the constructor
#### 4.3.3.8 MonteCarloVariableRandomUniformInt
The construction arguments are:
1. variable name
2. seed for random generator, defaults to 0
3. lower-bound of distribution, default to 0
4. upper-bound for distribution, defaults to 1
There is no additional configuration beyond the constructor
#### 4.3.3.9 MonteCarloVariableSemiFixed
The construction arguments are:
1. variable name
2. reference to the MonteCarloVariable whose generated value is to be used as the fixed value.
There is no additional configuration beyond the constructor.
#### 4.3.3.10 MonteCarloPythonLineExec
The construction arguments are:
1. variable name
2. an STL-string providing the Python instruction for the computing of the value to be assigned to the specified variable.
There is no additional configuration beyond the constructor.
#### 4.3.3.11 MonteCarloPythonFileExec
The construction argument is:
1. name of the file to be executed from the generated input file.
There is no additional configuration beyond the constructor.
## 4.4 Information on the Generated Files
This section is for informational purposes only to describe the contents of the automatically-generated dispersion files. Users do not need to take action on any content in here.
The generated files can be broken down into 3 parts:
* Configuration for the input file. These two lines set the flags such that when this file is executed, the content of the original input file will configure the run for a monte-carlo analysis but without re-generating the dispersion files.
```python
monte_carlo.master.active = True
monte_carlo.master.generate_dispersions = False
```
* Execution of the original input file. This line opens the original input file so that when this file is executed, the original input file is also executed automatically.
```python
exec(open('RUN_1/input.py').read())
```
* Assignment to simulation variables. This section always starts with the assignment to the run-number, which is also found in the name of the run, so RUN_0 gets a 0, RUN_1 gets a 1, etc. This value can be used, for example, to generate data sweeps as described in section MonteCarloPythonLineExec above.
```python
monte_carlo.master.monte_run_number = 0
object.test_variable1 = 5
object.test_variable1 = 1.23456789
...
```
## 4.5 Extension
The model is designed to be extensible and while we have tried to cover the most commonly used applications, complete anticipation of all use-case needs is impossible. The most likely candidate for extension is in the area of additional distributions. In this case:
* A new distribution should be defined in its own class
* That class shall inherit from MonteCarloVariable or, if it involves a random generation using a distribution found in the C++ `<random>` library, from MonteCarloVariableRandom.
* Populate the command variable inherited from MonteCarloVariable. This is the STL string representing the content that the MonteCarloMaster will place into the generated dispersion files.
* Call the `insert_units()` method inherited from MonteCarloVariable
* Set the `command_generated` flag to true if the command has been successfully generated.
## 4.6 Running generated runs within an HPC framework
Modern HPC (High Performance Computing) labs typically have one or more tools for managing the execution of jobs across multiple computers. There are several linux-based scheduling tools, but this section focuses on running the generated runs using a SLURM (Simple Linux Utility for Resource Management) array job. Consider this script using a simulation built with gcc 4.8 and a user-configured run named `RUN_example` which has already executed once with the Monte-Carlo Generation model enabled to generate 100 runs on disk:
```bash
#SBATCH --array=0-99
# This is an example sbatch script demonstrating running an array job in SLURM.
# SLURM is an HPC (High-Performance-Computing) scheduling tool installed in
# many modern super-compute clusters that manages execution of a massive
# number of user-jobs. When a script like this is associated with an array
# job, this script is executed once per enumerated value in the array. After
# the Monte Carlo Generation Model executes, the resulting RUNs can be queued
# for SLURM execution using a script like this. Alternatively, sbatch --wrap
# can be used. See the SLURM documentation for more in-depth information.
#
# Slurm: https://slurm.schedmd.com/documentation.html
# $SLURM_ARRAY_TASK_ID is automatically provided by slurm, and will be an
# integer between 0-99 per the "SBATCH --array" flag specified at the top of
# this script
echo "SLURM has provided us with array job integer: $SLURM_ARRAY_TASK_ID"
# Convert this integer to a zero-padded string matching the RUN naming
# convention associated with thi
RUN_NUM=`printf %02d $SLURM_ARRAY_TASK_ID`
# Execute the single trick simulation run associated with RUN_NUM
echo "Running RUN_$RUN_NUM ..."
./S_main_Linux_4.8_x86_64.exe MONTE_RUN_example/RUN_${RUN_NUM}/monte_input.py
```
The above script can be executed within a SLURM environment by running `sbatch <path/to/script.sh>`. This single command will create 100 independent array jobs in SLURM, allowing the scheduler to execute them as resources permit. Be extra careful with the zero-padding logic in the script above. The monte-carlo generation model will create zero-padded `RUN` names suitable for the number of runs requested to be generated by the user. The `%02d` part of the script above specifies 2-digit zero-padding which is suitable for 100 runs. Be sure to match this logic with the zero-padding as appropriate for your use-case.
For more information on SLURM, refer to the project documentation: https://slurm.schedmd.com/documentation.html
# 5 Verification
The verification of the model is provided by tests defined in `test/SIM_mc_generation`. This sim was originally developed by by JSC/EG NASA in the 2020 timeframe. The verification section of the original documentation is omitted from this markdown file because it heavily leverages formatting that markdown cannot support. It can be viewed [here](MCG_verification_2020.pdf)

View File

@ -1,3 +1,7 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Miscellaneous Trick Tools](Miscellaneous-Trick-Tools) → Python Variable Server Client |
|------------------------------------------------------------------|
`variable_server.py` is a Python module for communicating with a sim's variable server from a Python program. Its primary purpose is to easily get and set variable values and units, but it also includes some additional convenience methods for affecting the sim's state. The code itself is well-commented, so I won't be reproducing the API here. Run `pydoc variable_server` (in the containing directory) for that.
# Release Your Resources!
@ -384,4 +388,4 @@ class Variable(__builtin__.object)
| should not directly change any part of this class.
```
[Continue to Software Requirements](software_requirements_specification/SRS)
[Continue to Trick Ops](TrickOps)

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Miscellaneous Trick Tools](Miscellaneous-Trick-Tools) → Trick Ops |
|------------------------------------------------------------------|
# Table of Contents
* [Requirements](#Requirements)
* [Features](#Features)
@ -9,10 +12,11 @@
* [Other Useful Examples](#other-useful-examples)
* [The TrickOps Design](#regarding-the-design-why-do-i-have-to-write-my-own-script)
* [Tips & Best Practices](#tips--best-practices)
* [MonteCarloGenerationHelper](#montecarlogenerationhelper---trickops-helper-class-for-montecarlogeneratesm-users)
# TrickOps
TrickOps is shorthand for "Trick Operations", and is a `python3` framework that provides an easy-to-use interface for common testing and workflow actions that Trick simulation developers and users often run repeatedly. Good software developer workflows typically have a script or set of scripts that the developer can run to answer the question "have I broken anything?". The purpose of TrickOps is to provide the logic central to managing these tests while allowing each project to define how and and what they wish to test. Don't reinvent the wheel, use TrickOps!
TrickOps is shorthand for "Trick Operations". TrickOps is a `python3` framework that provides an easy-to-use interface for common testing and workflow actions that Trick simulation developers and users often run repeatedly. Good software developer workflows typically have a script or set of scripts that the developer can run to answer the question "have I broken anything?". The purpose of TrickOps is to provide the logic central to managing these tests while allowing each project to define how and and what they wish to test. Don't reinvent the wheel, use TrickOps!
TrickOps is *not* a GUI, it's a set of python modules that you can `import` that let you build a testing framework for your Trick-based project with just a few lines of python code.
@ -51,39 +55,42 @@ Simple and readable, this config file is parsed by `PyYAML` and adheres to all n
```yaml
globals:
env: <-- optional literal string executed before all tests, e.g. env setup
parallel_safety: <-- <loose|strict> strict won't allow multiple input files per RUN dir
env: <-- optional literal string executed before all tests, ex: ". env.sh"
SIM_abc: <-- required unique name for sim of interest, must start with SIM
path: <-- required SIM path relative to project top level
description: <-- optional description for this sim
labels: <-- optional list of labels for this sim, can be used to get sims
- model_x by label within the framework, or for any other project-defined
- verification purpose
build_command: <-- optional literal cmd executed for SIM_build, defaults to trick-CP
build_args: <-- optional literal args passed to trick-CP during sim build
binary: <-- optional name of sim binary, defaults to S_main_{cpu}.exe
size: <-- optional estimated size of successful build output file in bytes
phase: <-- optional phase to be used for ordering builds if needed
parallel_safety: <-- <loose|strict> strict won't allow multiple input files per RUN dir.
Defaults to "loose" if not specified
runs: <-- optional dict of runs to be executed for this sim, where the
RUN_1/input.py --foo: dict keys are the literal arguments passed to the sim binary
RUN_2/input.py: and the dict values are other run-specific optional dictionaries
... described as follows ...
RUN_[10-20]/input.py: described in indented sections below. Zero-padded integer ranges
can specify a set of runs with continuous numbering using
[<starting integer>-<ending integer>] notation
returns: <int> <---- optional exit code of this run upon completion (0-255). Defaults
to 0
compare: <---- optional list of <path> vs. <path> comparison strings to be
- a vs. b compared after this run is complete. This is extensible in that
- d vs. e all non-list values are ignored and assumed to be used to define
- ... an alternate comparison method in derived classes
- a vs. b compared after this run is complete. Zero-padded integer ranges
- d vs. e are supported as long as they match the pattern in the parent run.
- ... All non-list values are ignored and assumed to be used to define
- ... an alternate comparison method in a class extending this one
analyze: <-- optional arbitrary string to execute as job in bash shell from
project top level, for project-specific post-run analysis
valgrind: <-- optional dict describing how to execute runs within valgrind
flags: <-- string of all flags passed to valgrind for all runs
runs: <-- list of literal arguments passed to the sim binary through
- RUN_1... valgrind
phase: <-- optional phase to be used for ordering runs if needed
valgrind: <-- optional string of flags passed to valgrind for this run.
If missing or empty, this run will not use valgrind
non_sim_extension_example:
will: be ignored by TrickWorkflow parsing for derived classes to implement as they wish
```
Almost everything in this file is optional, but there must be at least one top-level key that starts with `SIM` and it must contain a valid `path: <path/to/SIM...>` with respect to the top level directory of your project. Here, `SIM_abc` represents "any sim" and the name is up to the user, but it *must* begin with `SIM` since `TrickWorkflow` purposefully ignores any top-level key not beginning with `SIM` in order to allow for extensibility of the YAML file for non-sim tests specific to a project.
Almost everything in this file is optional, but there must be at least one top-level key that starts with `SIM` and it must contain a valid `path: <path/to/SIM...>` with respect to the top level directory of your project. Here, `SIM_abc` represents "any sim" and the name is up to the user, but it *must* begin with `SIM` since `TrickWorkflow` purposefully ignores any top-level key not beginning with `SIM` and any key found under the `SIM` key not matching any named parameter above. This design allows for extensibility of the YAML file for non-sim tests specific to a project.
There is *no limit* to number of `SIM`s, `runs:`, `compare:` lists, `valgrind` `runs:` list, etc. This file is intended to contain every Sim and and every sim's run, and every run's comparison and so on that your project cares about. Remember, this file represents the *pool* of tests, not necessarily what *must* be tested every time your scripts which use it run.
@ -97,19 +104,21 @@ cd trick/share/trick/trickops/
```
When running, you should see output that looks like this:
![ExampleWorkflow In Action](trickops_example.png)
![ExampleWorkflow In Action](images/trickops_example.png)
When running, you'll notice that tests occur in two phases. First, sims build in parallel up to three at a time. Then when all builds complete, sims run in parallel up to three at a time. Progress bars show how far along each build and sim run is at any given time. The terminal window will accept scroll wheel and arrow input to view current builds/runs that are longer than the terminal height.
When running this example script, you'll notice that tests occur in two phases. First, sims build in parallel up to three at a time. Then when all builds complete, sims run in parallel up to three at a time. Progress bars show how far along each build and sim run is at any given time. The terminal window will accept scroll wheel and arrow input to view current builds/runs that are longer than the terminal height. Before the script finishes, it reports a summary of what was done, providing a list of which sims and runs were successful and which were not.
Looking inside the script, the code at top of the script creates a yaml file containing a large portion of the sims and runs that ship with trick and writes it to `/tmp/config.yml`. This config file will be input to the framework. At the bottom of the script is where the magic happens, this is where the TrickOps modules are used:
Looking inside the script, the code at top of the script creates a yaml file containing a large portion of the sims and runs that ship with trick and writes it to `/tmp/config.yml`. This config file is then used as input to the `TrickWorkflow` framework. At the bottom of the script is where the magic happens, this is where the TrickOps modules are used:
```python
from TrickWorkflow import *
class ExampleWorkflow(TrickWorkflow):
def __init__( self, quiet, trick_top_level='/tmp/trick'):
# Real projects already have trick somewhere, but for this test, just clone it
# Real projects already have trick somewhere, but for this example, just clone & build it
if not os.path.exists(trick_top_level):
os.system('cd %s && git clone https://github.com/nasa/trick' % (os.path.dirname(trick_top_level)))
if not os.path.exists(os.path.join(trick_top_level, 'lib64/libtrick.a')):
os.system('cd %s && ./configure && make' % (trick_top_level))
# Base Class initialize, this creates internal management structures
TrickWorkflow.__init__(self, project_top_level=trick_top_level, log_dir='/tmp/',
trick_dir=trick_top_level, config_file="/tmp/config.yml", cpus=3, quiet=quiet)
@ -131,9 +140,11 @@ Let's look at a few key parts of the example script. Here, we create a new class
from TrickWorkflow import *
class ExampleWorkflow(TrickWorkflow):
def __init__( self, quiet, trick_top_level='/tmp/trick'):
# Real projects already have trick somewhere, but for this test, just clone it
# Real projects already have trick somewhere, but for this example, just clone & build it
if not os.path.exists(trick_top_level):
os.system('cd %s && git clone https://github.com/nasa/trick' % (os.path.dirname(trick_top_level)))
if not os.path.exists(os.path.join(trick_top_level, 'lib64/libtrick.a')):
os.system('cd %s && ./configure && make' % (trick_top_level))
```
Our new class `ExampleWorkflow.py` can be initialized however we wish as long as it provides the necessary arguments to it's Base class initializer. In this example, `__init__` takes two parameters: `trick_top_level` which defaults to `/tmp/trick`, and `quiet` which will be `False` unless `quiet` is found in the command-line args to this script. The magic happens on the very next line where we call the base-class `TrickWorkflow` initializer which accepts four required parameters:
@ -145,15 +156,15 @@ The required parameters are described as follows:
* `project_top_level` is the absolute path to the highest-level directory of your project. The "top level" is up to the user to define, but usually this is the top level of your repository and at minimum must be a directory from which all sims, runs, and other files used in your testing are recursively reachable.
* `log_dir` is a path to a user-chosen directory where all logging for all tests will go. This path will be created for you if it doesn't already exist.
* `trick_dir` is an absolute path to the top level directory for the instance of trick used for your project. For projects that use trick as a `git` `submodule`, this is usually `<project_top_level>/trick`
* `config_file` is the path to a YAML config file describing the sims, runs, etc. for your project. It's recommended this file be tracked in your SCM tool but that is not required. More information on the syntax expected in this file in the **The YAML File** section below.
* `config_file` is the path to a YAML config file describing the sims, runs, etc. for your project. It's recommended this file be tracked in your SCM tool but that is not required. More information on the syntax expected in this file in the **The YAML File** section above.
The optional parameters are described as follows:
* `cpus` tells the framework how many CPUs to use on sim builds. This translates directly to `MAKEFLAGS` and is separate from the maximum number of simultaneous sim builds.
* `quiet` tells the framework to suppress progress bars and other verbose output. It's a good idea to use `quiet=True` if your scripts are going to be run in a continuous integration (CI) testing framework such as GitHub Actions, GitLab CI, or Jenkins, because it suppresses all `curses` logic during job execution which itself expects `stdin` to exist.
When `TrickWorkflow` initializes, it reads the `config_file` and verifies the information given matches the expected convention. If a non-fatal error is encountered, a message detailing the error is printed to `stdout` and the internal timestamped log file under `log_dir`. A fatal error will `raise RuntimeError`.
When `TrickWorkflow` initializes, it reads the `config_file` and verifies the information given matches the expected convention. If a non-fatal error is encountered, a message detailing the error is printed to `stdout` and the internal timestamped log file under `log_dir`. A fatal error will `raise RuntimeError`. Classes which inherit from `TrickWorkflow` may also access `self.parsing_errors` and `self.config_errors` which are lists of errors encountered from parsing the YAML file and errors encountered from processing the YAML file respectively.
Moving on to the next important lines of code in our `ExampleWorkflow.py` script. The `def run(self):` line declares a function whose return code on run is passed back to the calling shell via `sys.exit()`. This is where we use the functions given to us by inherting from `TrickWorkflow`:
Moving on to the next few important lines of code in our `ExampleWorkflow.py` script. The `def run(self):` line declares a function whose return code on run is passed back to the calling shell via `sys.exit()`. This is where we use the functions given to us by inherting from `TrickWorkflow`:
```python
@ -173,7 +184,7 @@ The last three lines simply print a detailed report of what was executed and man
return (builds_status or runs_status or self.config_errors)
```
The `ExampleWorkflow.py` uses sims/runs provided by trick to exercise *some* of the functionality provided by TrickOps. This script does not have any comparisons, post-run analyses, or valgrind runs defined in the YAML file, so there is no execution of those tests in this example.
The `ExampleWorkflow.py` script uses sims/runs provided by trick to exercise *some* of the functionality provided by TrickOps. This script does not have any comparisons, post-run analyses, or valgrind runs defined in the YAML file, so there is no execution of those tests in this example.
## `compare:` - File vs. File Comparisons
@ -188,8 +199,8 @@ SIM_ball:
RUN_foo/input.py:
RUN_test/input.py:
compare:
- path/to/SIM_/ball/RUN_test/log_a.csv vs. regression/SIM_ball/log_a.csv
- path/to/SIM_/ball/RUN_test/log_b.trk vs. regression/SIM_ball/log_b.trk
- path/to/SIM_ball/RUN_test/log_a.csv vs. regression/SIM_ball/log_a.csv
- path/to/SIM_ball/RUN_test/log_b.trk vs. regression/SIM_ball/log_b.trk
```
In this example, `SIM_ball`'s run `RUN_foo/input.py` doesn't have any comparisons, but `RUN_test/input.py` contains two comparisons, each of which compares data generated by the execution of `RUN_test/input.py` to a stored off version of the file under the `regression/` directory relative to the top level of the project. The comparisons themselves can be executed in your python script via the `compare()` function in multiple ways. For example:
@ -233,10 +244,98 @@ if not failure:
If an error is encountered, like `koviz` or a given directory cannot be found, `None` is returned in the first index of the tuple, and the error information is returned in the second index of the tuple for `get_koviz_report_job()`. The `get_koviz_report_jobs()` function just wraps the singular call and returns a tuple of `( list_of_jobs, list_of_any_failures )`. Note that `koviz` accepts entire directories as input, not specific paths to files. Keep this in mind when you organize how regression data is stored and how logged data is generated by your runs.
## `analyze:` - Post-Run Analysis
The optional `analyze:` section of a `run:` is intended to be a catch-all for "post-run analysis". The string given will be transformed into a `Job()` instance that can be retrieved and executed via `execute_jobs()` just like any other test. All analyze jobs are assumed to return 0 on success, non-zero on failure. One example use case for this would be creating a `jupytr` notebook that contains an analysis of a particular run.
## Defining sets of runs using [integer-integer] range notation
The `yaml` file for your project can grow quite large if your sims have a lot of runs. This is especially the case for users of monte-carlo, which may generate hundreds or thousands of runs that you may want to execute as part of your TrickOps script. In order to support these use cases without requiring the user to specify all of these runs individually, TrickOps supports a zero-padded `[integer-integer]` range notation in the `run:` and `compare:` fields. Consider this example `yaml` file:
```yaml
SIM_many_runs:
path: sims/SIM_many_runs
runs:
RUN_[000-100]/monte_input.py:
returns: 0
compare:
sims/SIM_many_runs/RUN_[000-100]/log_common.csv vs. baseline/sims/SIM_many_runs/log_common.csv
sims/SIM_many_runs/RUN_[000-100]/log_verif.csv vs. baseline/sims/SIM_many_runs/RUN_[000-100]/log_verif.csv
```
In this example, `SIM_many_runs` has 101 runs. Instead of specifying each individual run (`RUN_000/`, `RUN_001`, etc), in the `yaml` file, the `[000-100]` notation is used to specify a set of runs. All sub-fields of the run apply to that same set. For example, the default value of `0` is used for `returns:`, which also applies to all 101 runs. The `compare:` subsection supports the same range notation, as long as the same range is used in the `run:` named field. Each of the 101 runs shown above has two comparisons. The first `compare:` line defines a common file to be compared against all 101 runs. The second `compare:` line defines run-specific comparisons using the same `[integer-integer]` sequence. Note that when using these range notations zero-padding must be consistent, the values (inclusive) must be non-negative, and the square bracket notation must be used with the format `[minimum-maximum]`.
## `phase:` - An optional mechanism to order builds, runs, and analyses
The `yaml` file supports an optional parameter `phase: <integer>` at the sim and run level which allows the user to easily order sim builds, runs, and/or analyses, to suit their specific project constraints. If not specified, all sims, runs, and analyses, have a `phase` value of `0` by default. Consider this example `yaml` file with three sims:
```yaml
SIM_car:
path: sims/SIM_car
SIM_monte:
path: sims/SIM_monte
runs:
RUN_nominal/input.py --monte-carlo: # Generates the runs below
phase: -1
MONTE_RUN_nominal/RUN_000/monte_input.py: # Generated run
MONTE_RUN_nominal/RUN_001/monte_input.py: # Generated run
MONTE_RUN_nominal/RUN_002/monte_input.py: # Generated run
MONTE_RUN_nominal/RUN_003/monte_input.py: # Generated run
MONTE_RUN_nominal/RUN_004/monte_input.py: # Generated run
# A sim with constraints that make the build finnicky, and we can't change the code
SIM_external:
path: sims/SIM_external
phase: -1
runs:
RUN_test/input.py:
returns: 0
```
Here we have three sims: `SIM_car`, `SIM_monte`, and `SIM_external`. `SIM_car` and `SIM_monte` have the default `phase` of `0` and `SIM_external` has been assigned `phase: -1` explicitly. If using non-zero phases, jobs can be optionally filtered by them when calling helper functions like `self.get_jobs(kind, phase)`. Some examples:
```python
build_jobs = self.get_jobs(kind='build') # Get all build jobs regardless of phase
build_jobs = self.get_jobs(kind='build', phase=0) # Get all build jobs with (default) phase 0
build_jobs = self.get_jobs(kind='build', phase=-1) # Get all build jobs with phase -1
build_jobs = self.get_jobs(kind='build', phase=[0, 1, 3]) # Get all build jobs with phase 0, 1, or 3
build_jobs = self.get_jobs(kind='build', phase=range(-10,11)) # Get all build jobs with phases between -10 and 10
```
This can be done for runs and analyses in the same manner:
```python
run_jobs = self.get_jobs(kind='run') # Get all run jobs regardless of phase
run_jobs = self.get_jobs(kind='run', phase=0) # Get all run jobs with (default) phase 0
# Get all run jobs with all phases less than zero
run_jobs = self.get_jobs(kind='run', phase=range(TrickWorkflow.allowed_phase_range['min'],0))
# Get all analysis jobs with all phases zero or greater
an_jobs = self.get_jobs(kind='analysis', phase=range(0, TrickWorkflow.allowed_phase_range['max'+1]))
```
Note that since analysis jobs are directly tied to a single named run, they inherit the `phase` value of their run as specfied in the `yaml` file. In other words, do not add a `phase:` section indented under any `analyze:` section in your `yaml` file.
It's worth emphasizing that the specfiication of a non-zero `phase` in the `yaml` file, by itself, does not affect the order in which actions are taken. **It is on the user of TrickOps to use this information to order jobs appropriately**. Here's an example in code of what that might look for the example use-case described by the `yaml` file in this section:
```python
first_build_jobs = self.get_jobs(kind='build', phase=-1) # Get all build jobs with phase -1 (SIM_external)
second_build_jobs = self.get_jobs(kind='build', phase=0) # Get all build jobs with phase 0 (SIM_car & SIM_monte)
first_run_jobs = self.get_jobs(kind='run', phase=-1) # Get all run jobs with phase -1 (RUN_nominal/input.py --monte-carlo)
second_run_jobs = self.get_jobs(kind='run', phase=0) # Get all run jobs with phase 0 (All generated runs & RUN_test/input.py)
# SIM_external must build before SIM_car and SIM_monte, for project-specific reasons
builds_status1 = self.execute_jobs(first_build_jobs, max_concurrent=3, header='Executing 1st phase sim builds.')
# SIM_car and SIM_monte can build at the same time with no issue
builds_status2 = self.execute_jobs(second_build_jobs, max_concurrent=3, header='Executing 2nd phase sim builds.')
# SIM_monte's 'RUN_nominal/input.py --monte-carlo' generates runs
runs_status1 = self.execute_jobs(first_run_jobs, max_concurrent=3, header='Executing 1st phase sim runs.')
# SIM_monte's 'MONTE_RUN_nominal/RUN*/monte_input.py' are the generated runs, they must execute after the generation is complete
runs_status2 = self.execute_jobs(second_run_jobs, max_concurrent=3, header='Executing 2nd phase sim runs.')
```
Astute observers may have noticed that `SIM_external`'s `RUN_test/input.py` technically has no order dependencies and could execute in either the first or second run job set without issue.
A couple important points on the motivation for this capability:
* Run phasing was primarly developed to support testing monte-carlo and checkpoint sim scenarios, where output from a set of scenarios (like generated runs or dumped checkpoints) becomes the input to another set of sim scenarios.
* Sim phasing exists primarly to support testing scenarios where sims are poorly architectured or immutable, making them unable to be built independently.
## Where does the output of my tests go?
All output goes to a single directory `log_dir`, which is a required input to the `TrickWorkflow.__init__()` function. Sim builds, runs, comparisons, koviz reports etc. are all put in a single directory with unique names. This is purposeful for two reasons:
@ -284,11 +383,48 @@ This is purposeful -- handling every project-specific constraint is impossible.
* If your project requires an environment, it's usually a good idea to track a source-able environment file that users can execute in their shell. For example, if `myproject/.bashrc` contains your project environment, you should add `source .bashrc ;` to the `env:` section of `globals` in your YAML config file. This tells `TrickWorkflow` to add `source .bashrc ; ` before every `Job()`'s `command`.
* Make sure you execute your tests in an order that makes sense logically. The TrickOps framework will not automatically execute a sim build before a sim run for example, it's on the user to define the order in which tests run and which tests are important to them.
* Be cognizant of how many CPUs you've passed into `TricKWorkflow.__init__` and how many sims you build at once. Each sim build will use the `cpus` given to `TrickWorkflow.__init__`, so if you are building 3 sims at once each with 3 cpus you're technically requesting 9 cpus worth of build, so to speak.
* If `TrickWorkflow` encounters non-fatal errors while validating the content of the given YAML config file, it will set the internal member `self.config_erros` to be `True`. If you want your script to return non-zero on any non-fatal error, add this return code to your final script `sys.exit()`.
* If `TrickWorkflow` encounters non-fatal errors while verifying the content of the given YAML config file, it will add those errors to the internal `self.config_errors` list of strings. If you want your script to return non-zero on any non-fatal error, include `self.config_errors` in the criteria used for `sys.exit(<success_criteria>)`. Similar recommendation for `self.parsing_errors` which contains all errors found while parsing the YAML file.
* Treat the YAML file like your project owns it. You can store project-specific information and retrieve that information in your scripts by accessing the `self.config` dictionary. Anything not recognized by the internal validation of the YAML file is ignored, but that information is still provided to the user. For example, if you wanted to store a list of POCS in your YAML file so that your script could print a helpful message on error, simply add a new entry `project_pocs: email1, email2...` and then access that information via `self.config['project_pocs']` in your script.
## `MonteCarloGenerationHelper` - TrickOps Helper Class for `MonteCarloGenerate.sm` users
TrickOps provides the `MonteCarloGenerationHelper` python module as an interface between a sim using the `MonteCarloGenerate.sm` (MCG) sim module and a typical Trick-based workflow. This module allows MCG users to easily generate monte-carlo runs and execute them locally or alternatively through an HPC job scheduler like SLURM. Below is an example usage of the module. This example assumes:
1. The using script inherits from or otherwise leverages `TrickWorkflow`, giving it access to `self.execute_jobs()`
2. `SIM_A` is already built and configured with the `MonteCarloGenerate.sm` sim module
3. `RUN_mc/input.py` is configured with to generate runs when executed, specifically that `monte_carlo.mc_master.generate_dispersions == monte_carlo.mc_master.active == True` in the input file.
```python
# Instantiate an MCG helper instance, providing the sim and input file for generation
mgh = MonteCarloGenerationHelper(sim_path="path/to/SIM_A", input_path="RUN_mc/input.py")
# Get the generation SingleRun() instance
gj = mgh.get_generation_job()
# Execute the generation Job to generate RUNS
ret = self.execute_jobs([gj])
if ret == 0: # Successful generation
# Get a SLURM sbatch array job for all generated runs found in monte_dir
# SLURM is an HPC (High-Performance-Computing) scheduling tool installed on
# many modern super-compute clusters that manages execution of a massive
# number of jobs. See the official documentation for more information
# Slurm: https://slurm.schedmd.com/documentation.html
sbj = mgh.get_sbatch_job(monte_dir="path/to/MONTE_RUN_mc")
# Execute the sbatch job, which queues all runs in SLURM for execution
# Use hpc_passthrough_args ='--wait' to block until all runs complete
ret = self.execute_jobs([sbj])
# Instead of using SLURM, generated runs can be executed locally through
# TrickOps calls on the host where this script runs. First get a list of
# run jobs
run_jobs = mgh.get_generated_run_jobs(monte_dir="path/to/MONTE_RUN_mc")
# Then execute all generated SingleRun instances, up to 10 at once
ret = self.execute_jobs(run_jobs, max_concurrent=10)
```
Note that the number of runs to-be-generated is configured somewhere in the `input.py` code and this module cannot robustly know that information for any particular use-case. This is why `monte_dir` is a required input to several functions - this directory is processed by the module to understand how many runs were generated.
## More Information
A lot of time was spent adding `python` docstrings to the `TrickWorkflow.py` and `WorkflowCommon.py` modules. This README does not cover all functionality, so please see the in-code documentation for more detailed information on the framework.
A lot of time was spent adding `python` docstrings to the modules in the `trickops/` directory and tests under the `trickops/tests/`. This README does not cover all functionality, so please see the in-code documentation and unit tests for more detailed information on the framework capabilities.
[Continue to Software Requirements](../software_requirements_specification/SRS)

View File

Before

Width:  |  Height:  |  Size: 269 KiB

After

Width:  |  Height:  |  Size: 269 KiB

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Running a Simulation](Running-a-Simulation) → Input File |
|------------------------------------------------------------------|
The primary interface between the simulation executable and the user is the runstream
input file. The Trick simulation input file syntax is Python. All Python syntax rules
@ -253,7 +255,7 @@ There are several ways to include files in Python.
```python
# One way is to use the execfile command
execfile("Modified_data/data_record.py")
exec(open("Modified_data/data_record.py").read())
# Another way is to make the included file a module and import it.
# Import search paths may be added using the sys.path.append command.
@ -447,6 +449,9 @@ For information on how Trick processes events during runtime, see [Event Process
# Add the event to the input processor's list of events (it will be processed at top of frame before scheduled jobs)
trick.add_event(<event name>)
# Tell trick whether to terminate the sim if an error occurs while parsing Python code. Defaults to False
trick.terminate_on_event_parse_error(<True|False>)
```
#### Advanced Event (Malfunction) Usage

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Running a Simulation |
|------------------------------------------------------------------|
S_main_${TRICK_HOST_CPU}.exe is generated by the CP and is the simulation main executable program.
The runtime configuration of the executive and its associated support utilities may be manipulated through entries in the simulation input file. The input file is described in detail in Input_File.
@ -9,6 +12,7 @@ S_main_${TRICK_HOST_CPU}.exe [trick_version] [sie]
RUN_<name>/<input_file_name> [-d]
[-O <output_file_path>]
[-OO <output_file_path>]
[--read-only-sim]
[-u <user_defined_arguments>]
```
@ -18,6 +22,7 @@ S_main_${TRICK_HOST_CPU}.exe [trick_version] [sie]
- The '-d' argument is optional and, if specified, starts the simulation in an input file verification mode. In this mode the entire input file is read, echoed to standard out, and then the simulation exits without calling any jobs listed in the S_define file. This mode helps debug input file syntax errors.
- The '-O <output_file_path>' option allows the user to specify the directory to which simulation data log files will be written. If this option is omitted, the RUN_<name> directory is used.
- The '-OO <output_file_path>' option allows the user to specify the directory to which ALL simulation output files will be written. If this option is omitted, the RUN_<name> directory is used.
- The '--read-only-sim' flag can be used to redirect all files written at simulation runtime into the output directory.
- The '-u' option specifies that all remaining arguments are meant to be used by user supplied jobs. All arguments after the -u can be accessed internal to the simulation jobs by calling the get_cmnd_args() function of the executive as illustrated below. In a master/slave simulation, the master's -u args will be passed to the slave.
The following code example shows how a function can access the command line arguments during execution.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Running a Simulation](Running-a-Simulation) → Runtime Output|
|------------------------------------------------------------------|
Executing the simulation main executable S_main_${TRICK_HOST_CPU}.exe generates a handful of log files that document a simulation run.
The log files are written to the RUN_<name> directory by default. The destination can be redirected by specifying the -O or -OO option for the runtime executive. Two of the log files are described below.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Running a Simulation](../Running-a-Simulation) → [Runtime GUIs](Runtime-GUIs) → Malfunctions |
|------------------------------------------------------------------|
### Events/Malfunctions Trick View
Events/Malfunctions Trick View (hereafter referred to as MTV) is a graphical user interface that has two main functions:

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Running a Simulation](../Running-a-Simulation) → [Runtime GUIs](Runtime-GUIs) → Monte Monitor |
|------------------------------------------------------------------|
### Monte Monitor
Monte Monitor (hereafter referred to as MM) is a graphical user interface that allows users to view and modify the states

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Running a Simulation](../Running-a-Simulation) → Runtime GUIs |
|------------------------------------------------------------------|
Trick provides the following graphical user interfaces:
### Simulation Control Panel

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Running a Simulation](../Running-a-Simulation) → [Runtime GUIs](Runtime-GUIs) → Trick View |
|------------------------------------------------------------------|
Trick View (hereafter referred to as TV) is a graphical user interface that allows users to view and modify Trick-managed variables in a simulation while it is running. It also provides for the launching of integrated strip charts and can save and restore lists of variables and their associated strip charts.
#### Launching

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Checkpoints |
|------------------------------------------------------------------|
The Trick CheckPointWriter is a C++ Class that implements checkpointing.
## Checkpointing
@ -20,6 +23,12 @@ trick.checkpoint_cpu(<cpu_num>)
trick.checkpoint_safestore_set_enabled(True|False)
# Set the safestore checkpoint period. default 9x10e18
trick.checkpoint_safestore(<period>)
# Load a checkpoint
trick.load_checkpoint(<filename>)
# Load a checkpoint without restoring STLs
trick.load_checkpoint(<filename>, False)
```
[Continue to Memory Manager](memory_manager/MemoryManager)

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Command Line Arguments |
|------------------------------------------------------------------|
The Trick::CommandLineArguments class stores the command line arguments specified
by the user when starting the simulation. The class provides routines to

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Data Record |
|------------------------------------------------------------------|
Data Recording provides the capability to specify any number of data recording groups,
each with an unlimited number of parameter references, and with each group recording
@ -43,6 +45,7 @@ For example:
drg.add_variable("ball.obj.state.output.position[0]")
drg.add_variable("ball.obj.state.output.position[1]")
```
In this example `position` is an array of floating point numbers. **DO NOT ATTEMPT TO DATA RECORD C OR C++ STRINGS. THIS HAS BEEN OBSERVED TO CREATE MEMORY ISSUES AND TRICK DOES NOT CURRENTLY PROVIDE ERROR CHECKING FOR THIS UNSUPPORTED USE CASE**
An optional alias may also be specified in the method as <tt>drg.add_variable("<string_of_variable_name>" [, "<alias>"])</tt>.
If an alias is present as a second argument, the alias name will be used in the data recording file instead of the actual variable name.
@ -53,6 +56,8 @@ drg.add_variable("ball.obj.state.output.position[0]", "x_pos")
drg.add_variable("ball.obj.state.output.position[1]", "y_pos")
```
Only individual primitive types can be recorded. Arrays, strings/char *, structured objects, or STL types are not supported.
### Changing the Recording Rate
To change the recording rate call the <tt>set_cycle()</tt> method of the recording group.
@ -380,7 +385,7 @@ The following data-types are used in Trick versions >= 10, that is for, *vv* = "
### DRHDF5 Recording Format
HDF5 recording format is an industry conforming HDF5 formatted file. Files written in this format are named
log_<group_name>.hd5. The contents of this file type are readable by the Trick Data Products packages from
log_<group_name>.h5. The contents of this file type are readable by the Trick Data Products packages from
Trick 07 to the current version. The contents of the file are binary and is not included here. The HDF5 layout
of the file follows.
@ -415,4 +420,16 @@ GROUP "/" {
}
```
### Interaction with Checkpoints
Data recording groups are able to be checkpointed, reloaded, and restarted without any interaction by the user. When a checkpoint is loaded that includes data recording,
the data recording groups will be initiated and begin recording at the time in the checkpoint. For example, if a checkpoint was dumped when t=5, when the checkpoint is
loaded into another run, it will data record starting at t=5, no matter what time in the run it was loaded or whether the run was already data recording. Loading a checkpoint
will overwrite any data recording files that were being recorded before the load.
Loading a checkpoint with different data recording groups than the current run will overwrite the current data recording groups.
Refer to test/SIM_checkpoint_data_recording to see expected behavior in action. Overall, the loading a checkpoint should completely overwrite any other data recording the sim is currently doing, and the new recording will start at the time in the checkpoint. If you come across different behavior, please open an issue.
[Continue to Checkpointing](Checkpoints)

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Debug Pause |
|------------------------------------------------------------------|
Debug Pause is a debugging feature provided by Trick. When turned on, Debug Pause will print the current simulation time and
the name of the current job, and pause the simulation @e before the job is executed. Debug Pause provides a way for the user to step

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Echo Jobs |
|------------------------------------------------------------------|
Echo Jobs is a handy debugging toggle provided by Trick. When turned on, Echo Jobs will print the current simulation time and
the name of the current job being executed. Echo Jobs is a convenient way to see the order of job execution and narrow down when

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Environment |
|------------------------------------------------------------------|
This class saves the Trick environment variables at compile-time. The variables
are saved inside the S_main executable. These variables are read-only after they
@ -7,4 +9,4 @@ are compiled into the sim.
const char * get_trick_env( char * variable_name )
```
[Continue to STL Checkpointing](STL-Checkpointing)
[Continue to STL Checkpointing](STL-Capabilities)

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Event Manager |
|------------------------------------------------------------------|
This page describes the methods available in the Trick::EventManager and Trick::EventProcessor class.
See [Input_File](/trick/documentation/running_a_simulation/Input-File) for detailed information on the syntax of the input processor file.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Executive Scheduler |
|------------------------------------------------------------------|
This scheduler or derivative of this class is required for Trick simulations to run.
@ -203,10 +205,26 @@ If there is a job tag specified for one of more jobs in the S_define file, you c
```
# Python code
trick.exec_set_sim_object_onoff(char * job_name , int on) ;
trick.exec_set_sim_object_onoff(char * sim_object_name , int on) ;
```
The exec_set_sim_object_onoff routine allows users to turn individual whole sim_objects on and off.
The exec_set_sim_object_onoff routine allows users to turn individual whole sim_objects on and off. If individiual jobs were disabled before the sim object is disabled, they will retain their disabled status when the sim object is turned back on.
```
# Python code
trick.exec_get_sim_object_onoff(char * sim_object_name) ;
```
The exec_get_sim_object_onoff routine allows users to determine if the sim_object is currently on or off.
```
# Python code
trick.exec_set_sim_object_jobs_onoff(char * sim_object_name , int on) ;
```
The exec_set_sim_object_jobs_onoff allows users to turn all of the jobs in a sim_object on or off, but does not change the overall sim object's disabled status.
#### Job Cycle Time

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Frame Logging |
|------------------------------------------------------------------|
Trick provides a means to gather simulation performance data and view it using Data Products (see [Data Products](Data-Products)).
When the user turns on the Frame Logging feature, Trick will use its Data Recording mechanism to track the following:

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Input Processor |
|------------------------------------------------------------------|
This page describes the methods available in the Trick::InputProcessor class.
<b>See [Input File](../running_a_simulation/Input-File) for detailed information on the syntax of the input processor file.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Integrator |
|------------------------------------------------------------------|
Trick provides a state integration capability described by the inputs below.
To use these options a developer must develop application code which interfaces the application states with

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → JIT Input Processor |
|------------------------------------------------------------------|
A JIT input file is a C++ input file that can be used along side a python input file, or even replace the python input file. The JIT input file includes "S_source.hh" that gives it access to the entire simulation. The code in a JIT input file is compiled into a shared library during simulation initialization. The simulation dynamically opens the shared library and runs a specific function called "run_me" that must be defined in the JIT input file. There are a couple of advantages to this type of input file.
1. Once compiled, a JIT input file is orders of magnitude faster to execute than a python equivalent. That is because there is no python is involved, everything is C++.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Master/Slave |
|------------------------------------------------------------------|
Master/Slave is a way to do distributed processing in Trick using multiple simulations.
Master/Slave synchronization synchronizes a master simulation to one or more slave

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Realtime Clock |
|------------------------------------------------------------------|
# Realtime-Clock
**Contents**
@ -194,6 +197,6 @@ double exec_get_sim_time(void) ;
defined in ```exec_proto.h```.
Continue to [Realtime Timer](Realtime-Timer)

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Realtime Injector |
|------------------------------------------------------------------|
The Real Time Injector (RTI) allows the user to set simulation variables
synchronously without impacting real-time performance. The RTI performs the injection

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Realtime Timer |
|------------------------------------------------------------------|
A realtime sleep timer is an optional class for Trick simulations.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Realtime |
|------------------------------------------------------------------|
Trick provides a real-time processing capability which is different from many real-time
simulation capabilities. The Trick executive is a "time based" executive and can run in

View File

@ -1,5 +0,0 @@
Trick checkpoints the following STL types: array, vector, list, deque, set, multiset map, multimap, stack, queue, priority_queue, pair.
[Continue to Threads](Threads)

View File

@ -0,0 +1,113 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Using STLs in Trick Sims |
|------------------------------------------------------------------|
# Standard Template Libraries (STL) in Trick
STLs may be used in models. However, STL variables (currently) are not data recordable, visible in the variable server, nor directly accessible in the input file. Some STLs can be checkpointed: array, vector, list, deque, set, multiset, map, multimap, stack, queue, priority_queue, pair.
STL classes cannot be directly registered with the memory manager, but they can be processed by the checkpoint agent when nested inside normal C++ classes (including sim objects).
STL checkpoint restore may slow down the default data jobs of some sims. STL restore is on by default. To turn off STL restore:
If using memory manager through the C interface:
```
int TMM_set_stl_restore (int on_off);
```
If using the memory manager through the C++ interface, set the default or pass a parameter to your read_checkpoint function of choice:
```
int set_restore_stls_default (bool on);
int read_checkpoint( std::istream* in_s, bool do_restore_stls = restore_stls_default);
int read_checkpoint_from_string( const char* s, bool do_restore_stls = restore_stls_default );
int init_from_checkpoint( std::istream* in_s, bool do_restore_stls = restore_stls_default);
```
If using the checkpoint restart C interface:
```
int load_checkpoint_stls( const char * file_name, int with_stls ) ;
```
## What works:
To checkpoint an STL, it **must** be a member of a class or struct.
```
class AnyClass {
std::vector<int> vec;
}
```
Declare an instance of this class with the memory manager in the usual way:
```
AnyClass * my_class = tmm->declare_var("AnyClass my_alloc");
```
If it is in a class that is nested in a `sim_object`, it will be registered with the memory manager automatically.
You can nest an arbitrary amount of STLs, they will all be checkpointed as long as
the base is a member of a class that is registered with the memory manager. There
are a some known [**limitations**](#limitations).
```
class AnyClass {
std::pair<std::vector<std::map.......>>
}
```
<a id=limitations></a>
## Limitations
The STL checkpointing feature can only handle simple types, pointers, and nested STL types.
### An STL type within a user defined type within an STL will fail to checkpoint.
For example: a user defined class with an STL in it:
```
class VectorWrapper {
int a;
std::vector<int> vec;
}
```
An outer class (which is registered with the memory manager, like a member of a sim_object or something that has been explicitly declared) that has an STL container of these objects:
```
class MyObject {
std::vector<VectorWrapper> vec_user_defined;
}
```
If MyObject is populated, it will be able to checkpoint and restore without throwing an error, and all the `VectorWrapper` objects will be present, but `vec` will not be restored (`a` will restore successfully). The contents of `vec` are never written out to the checkpoint file.
If `MyObject` instead has a vector of pointers to `VectorWrapper`, and each `VectorWrapper` is registered with the memory manager, `vec` will checkpoint and restore successfully.
```
class MyObject {
std::vector<VectorWrapper *> vec_user_defined_ptr;
}
```
### You cannot directly create or register an external stl with the memory manager, you will get a parser error.
STLs can't register it with the memory manager, so there's no way for the checkpoint to know where the memory is.
```
class AnyClass {
std::vector<int> *anything;
}
class AnyClass {
std::pair<int,int>[5];
}
memorymanager->declare_var("std::vector<int> my_vector_allocation");
std::vector<int> my_vector;
memorymanager->declare_var_extern(&my_vector, "std::vector<int> my_vector_allocation");
```
[Continue to Threads](Threads)

View File

@ -1,4 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → Simulation Capabilities |
|------------------------------------------------------------------|
This section details the runtime capabilities of Trick.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Status Message System |
|------------------------------------------------------------------|
The Message Publisher publishes executive and/or model messages. A Message Subscriber gets the messages published by the Publisher.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Threads |
|------------------------------------------------------------------|
# The ThreadBase Class
A trick sim is a multi-threaded process, and all of the threads that are created by Trick or Trick generated code inherit from the abstract class ThreadBase. ThreadBase classes have the capability to change the priority and cpu affinity of the thread. ThreadBase classes include trick_sys threads as well as user defined threads from the S_define:
@ -70,4 +73,5 @@ Set the priority for the thread. See the man page for sched(7) for more details.
```cpp
int set_priority(unsigned int req_priority)
```
[Continue to Simulation Utilities](/trick/documentation/simulation_utilities/Simulation-Utilities)
[Continue to Web Server](../web/Webserver)

View File

@ -1,5 +1,5 @@
| [Home](Home) → [Documentation Home](Documentation-Home) → Monte Carlo |
|--------------------------------------------------------------------------------------|
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Monte Carlo |
|------------------------------------------------------------------|
# Introduction
Monte Carlo is an advanced simulation capability provided by Trick that allows users to repeatedly run copies of a simulation with different input values. Users can vary the input space of a simulation via input file, random value generation, or by calculating values from previous Monte Carlo runs in a process called optimization.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../Documentation-Home) → [Simulation Capabilities](Simulation-Capabilities) → Variable Server |
|------------------------------------------------------------------|
When running a Trick simulation, unless specifically turned off, a server called the
"variable server" is always up and listening in a separate thread of execution. The
@ -61,6 +63,16 @@ trick.var_server_get_hostname()
trick.var_server_get_port()
```
Additional TCP or UDP sockets can be opened as well. Additional TCP sockets operate the same way as the original variable server socket. A UDP socket will only host 1 variable server session, and the responses will be sent to the latest address that sends commands to it.
Note that this is not necessary to allow multiple variable server clients - any number of clients can connect to the original variable server port.
```python
trick.var_server_create_udp_socket( const char * source_address, unsigned short port )
trick.var_server_create_tcp_socket( const char * source_address, unsigned short port )
```
### Commands
The variable server accepts commands in the form of strings. The variable server parses
@ -135,10 +147,10 @@ The frame refers to the software frame in the Executive. In freeze mode a diffe
multiplier and offset are used.
```python
trick.var_set_frame_multiplier(int mult)
trick.var_set_frame_multiple(int mult)
trick.var_set_frame_offset(int offset)
trick.var_set_freeze_frame_multiplier(int mult)
trick.var_set_freeze_frame_multiple(int mult)
trick.var_set_freeze_frame_offset(int offset)
```
@ -187,6 +199,22 @@ trick.var_send()
The var_send command forces the variable server to return the list of values to the
client immediately.
#### Sending variables only once and immediately
```python
trick.var_send_once( string var_name)
```
The var_send_once command forces the variable server to return the value of the given
variable to the client immediately.
```python
trick.var_send_once( string var_list, int num_vars)
```
var_send_once can also accept a comma separated list of variables. The number of variables
in this list must match num_vars, or it will not be processed.
#### Changing the Units
```python
@ -384,12 +412,17 @@ unprintable character) that occurs within the character string value will appear
escaped character, i.e. preceded by a backslash.
The 1st value returned in the list will always be a message indicator. The possible
values of the message indicator are:
- 0 returned variable value(s) from var_add or var_send
- 1 returned value from var_exists
- 2 returned value from send_sie_resource (special command used by Trick View)
- 3 returned value from send_event_data (special command used by Events/Malfunctions Trick View) or var_send_list_size
- 4 values redirected from stdio if var_set_send_stdio is enabled
values of the message indicator listen in the table below.
| Name | Value | Meaning |
|-------------------|-------|---------|
| VS\_IP\_ERROR | -1 | Protocol Error|
| VS\_VAR\_LIST | 0 | A list of variable values. |
| VS\_VAR\_EXISTS | 1 | Response to var\_exists( variable_name )|
| VS\_SIE\_RESOURCE | 2 | Response to send_sie_resource|
| VS\_LIST\_SIZE | 3 | Response to var_send_list_size or send_event_data|
| VS\_STDIO | 4 | Values Redirected from stdio if var_set_send_stdio is enabled|
| VS\_SEND\_ONCE | 5 | Response to var\_send\_once|
If the variable units are also specified along with the variable name in a var_add or
var_units command, then that variable will also have its units specification returned following
@ -487,6 +520,8 @@ on your network sends it's information to this address and port so there may be
messages with variable server information available here. Here is some
C code that reads all messages on the variable server channel.
Note that the multicast protocol is disabled by default in MacOS.
```c
#include <stdio.h>
#include <sys/types.h>

View File

@ -1,3 +1,7 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Simulation Capabilities](../Simulation-Capabilities) → [Memory Manager](MemoryManager) → Declaration String|
|------------------------------------------------------------------|
### Memory Manager Declaration
A declaration provides a data type description of a chunk of memory.

View File

@ -1,3 +1,5 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Simulation Capabilities](../Simulation-Capabilities) → [Memory Manager](MemoryManager) → Trick Type|
|------------------------------------------------------------------|
- **TRICK_TYPE** is an enmeration type that specifies data types.
Available types are provided in the following table.

View File

@ -1,3 +1,6 @@
| [Home](/trick) → [Documentation Home](../../Documentation-Home) → [Simulation Capabilities](../Simulation-Capabilities) → Memory Manager |
|------------------------------------------------------------------|
## Trick Memory Manager
The Memory Manager

Some files were not shown because too many files have changed in this diff Show More