trick/test_sims.yml
ddj116 483cacfafd
MonteCarloGenerate: Provide more metadata on dispersions ()
* Add type, dispersion, min_value, max_value and other relevant internal
  members of MonteCarloVariable* classes to the output of
  MonteCarlo_Meta_data_output. Motivation is for users wanting to post-process
  dispersion parameters used during generation of runs
* Protect against invalid memory access when length of values is zero in
  MonteCarloVariableRandomStringSet::generate_assignment(). Add a new
  verif sim warning case to cover these new lines
* Update new verif data for SIM_mc_generation to support these changes

Closes 

Co-authored-by: Dan Jordan <daniel.d.jordan@nasa.gov>
2023-12-01 10:09:17 -06:00

468 lines
15 KiB
YAML

# Compile only sims
SIM_alloc_test:
path: test/SIM_alloc_test
SIM_alloc_test:
path: test/SIM_alloc_test
SIM_anon_enum:
path: test/SIM_anon_enum
SIM_default_member_initializer:
path: test/SIM_default_member_initializer
SIM_delete_default_constructor:
path: test/SIM_delete_default_constructor
SIM_demo_inputfile:
path: test/SIM_demo_inputfile
SIM_exclusion_mechanisms:
path: test/SIM_exclusion_mechanisms
SIM_isystem:
path: test/SIM_isystem
SIM_measurement_units:
path: test/SIM_measurement_units
SIM_parse_s_define:
path: test/SIM_parse_s_define
SIM_target_specific_variables:
path: test/SIM_target_specific_variables
SIM_test_abstract:
path: test/SIM_test_abstract
SIM_test_inherit:
path: test/SIM_test_inherit
SIM_test_ip2:
path: test/SIM_test_ip2
SIM_threads_simple:
path: test/SIM_threads_simple
SIM_trickcomm:
path: test/SIM_trickcomm
SIM_satellite:
path: trick_sims/SIM_satellite
# Normal case compile and run sims
SIM_demo_sdefine:
path: test/SIM_demo_sdefine
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_exec_set_time_tic_value:
path: test/SIM_exec_set_time_tic_value
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_python_namespace:
path: test/SIM_python_namespace
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_rti:
path: test/SIM_rti
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_dp:
path: test/SIM_test_dp
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_icg_file_skipped:
path: test/SIM_test_icg_file_skipped
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_io:
path: test/SIM_test_io
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_ip:
path: test/SIM_test_ip
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_sched:
path: test/SIM_test_sched
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_test_templates:
path: test/SIM_test_templates
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_threads:
path: test/SIM_threads
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_trickified:
path: test/SIM_trickified
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_ball_L1:
path: trick_sims/Ball/SIM_ball_L1
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_ball_L2:
path: trick_sims/Ball/SIM_ball_L2
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_ball_L3:
path: trick_sims/Ball/SIM_ball_L3
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_cannon_aero:
path: trick_sims/Cannon/SIM_cannon_aero
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_cannon_analytic:
path: trick_sims/Cannon/SIM_cannon_analytic
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_cannon_eulercromer:
path: trick_sims/Cannon/SIM_cannon_eulercromer
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_cannon_numeric:
path: trick_sims/Cannon/SIM_cannon_numeric
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_cannon_jet:
path: trick_sims/Cannon/SIM_cannon_jet
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_Ball++_L1:
path: trick_sims/SIM_Ball++_L1
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_sun:
path: trick_sims/SIM_sun
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
SIM_earlyterm:
path: test/SIM_earlyterm
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/input.py:
returns: 0
SIM_job_class_order:
path: test/SIM_job_class_order
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/input.py:
returns: 0
# Special cases
# setup.py dumps a checkpoint
# unit_test.py loads that checkpoint and verifies the data
SIM_stls:
path: test/SIM_stls
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/setup.py:
phase: -1
returns: 0
RUN_test/unit_test.py:
returns: 0
SIM_test_dr:
path: test/SIM_test_dr
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
compare:
- test/SIM_test_dr/RUN_test/log_DR_bitfieldsASCII.csv vs. test/SIM_test_dr/RUN_test/Ref_Logs/log_DR_bitfieldsASCII_Master.csv
- test/SIM_test_dr/RUN_test/log_DR_typesASCII.csv vs. test/SIM_test_dr/RUN_test/Ref_Logs/log_DR_typesASCII_Master.csv
- test/SIM_test_dr/RUN_test/log_DR_bitfieldsBINARY.trk vs. test/SIM_test_dr/RUN_test/Ref_Logs/log_DR_bitfieldsBINARY.trk
# All the dump.py runs dump a checkpoint
# All the unit_test.py runs load that checkpoint and then compare against expected logs
SIM_checkpoint_data_recording:
path: test/SIM_checkpoint_data_recording
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test[1-5]/dump.py:
phase: -1
returns: 0
RUN_test6/dump.py:
returns: 0
RUN_test[7-8]/dump.py:
phase: -1
returns: 0
# Note we could use the [1-5] notation here if RUN_test2 didn't stand out as not matching the pattern -Jordan 1/2023
RUN_test1/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test1/ref_log_foo.csv vs. test/SIM_checkpoint_data_recording/RUN_test1/log_foo.csv
RUN_test2/unit_test.py:
returns: 0
analyze: './test/SIM_checkpoint_data_recording/RUN_test2/check_log.sh'
RUN_test3/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test3/ref_log_foo.csv vs. test/SIM_checkpoint_data_recording/RUN_test3/log_foo.csv
RUN_test4/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test4/ref_log_foo.csv vs. test/SIM_checkpoint_data_recording/RUN_test4/log_foo.csv
RUN_test5/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test5/ref_log_foo.csv vs. test/SIM_checkpoint_data_recording/RUN_test5/log_foo.csv
RUN_test6/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test6/ref_log_foo2.csv vs. test/SIM_checkpoint_data_recording/RUN_test6/log_foo2.csv
RUN_test7/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test7/ref_log_fooChange.csv vs. test/SIM_checkpoint_data_recording/RUN_test7/log_fooChange.csv
RUN_test8/unit_test.py:
returns: 0
compare:
- test/SIM_checkpoint_data_recording/RUN_test8/ref_log_fooChange2.csv vs. test/SIM_checkpoint_data_recording/RUN_test8/log_fooChange2.csv
SIM_events:
path: test/SIM_events
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/unit_test.py:
returns: 0
RUN_test/unit_test_error1.py:
returns: 255
RUN_test/unit_test_error2.py:
returns: 255
RUN_test/unit_test_error3.py:
returns: 255
SIM_test_output_dir:
path: test/SIM_test_output_dir
build_args: "-t"
binary: "T_main_{cpu}_test.exe"
runs:
RUN_test/input.py -OO sim_output --read-only-sim:
returns: 0
analyze: 'python3 test/SIM_test_output_dir/ref_files/check_file_endings.py test/SIM_test_output_dir/ref_files/ref_compiletime_S_sie.resource test/SIM_test_output_dir/S_sie.resource'
analyze: 'python3 test/SIM_test_output_dir/ref_files/check_file_endings.py test/SIM_test_output_dir/ref_files/ref_runtime_S_sie.resource test/SIM_test_output_dir/sim_output/S_sie.resource'
# The variable server client and SIM_amoeba sometimes fail to connect and need to be retried
# JMP (9/23/2023): We can't have these in our regular CI test suite if they are going to
# periodically fail due to environmental variations over which we have no control. Commenting
# these out.
# SIM_test_varserv:
# path: test/SIM_test_varserv
# build_args: "-t"
# binary: "T_main_{cpu}_test.exe"
# runs:
# RUN_test/unit_test.py:
# phase: 1
# returns: 0
# RUN_test/err1_test.py:
# phase: 2
# returns: 10
# RUN_test/err2_test.py:
# phase: 3
# returns: 10
# SIM_amoeba:
# path: trick_sims/Cannon/SIM_amoeba
# build_args: "-t"
# binary: "T_main_{cpu}_test.exe"
# labels:
# - retries_allowed
# runs:
# RUN_test/unit_test.py:
# returns: 0
#TODO: all compares should be <test data> vs. <baseline data>, need to swap order!
SIM_mc_generation:
path: test/SIM_mc_generation
runs:
RUN_nominal/input_a.py:
phase: -1
compare:
- test/SIM_mc_generation/MONTE_RUN_nominal/MonteCarlo_Meta_data_output vs. test/SIM_mc_generation/verif_data/MONTE_RUN_nominal/MonteCarlo_Meta_data_output
- test/SIM_mc_generation/MONTE_RUN_nominal/monte_variables vs. test/SIM_mc_generation/verif_data/MONTE_RUN_nominal/monte_variables
MONTE_RUN_nominal/RUN_[000-001]/monte_input_a.py:
compare:
RUN_random_normal_truncate_abs/input.py:
phase: -1
compare:
MONTE_RUN_random_normal_truncate_abs/RUN_[0-9]/monte_input.py:
compare:
RUN_random_normal_truncate_rel/input.py:
phase: -1
compare:
MONTE_RUN_random_normal_truncate_rel/RUN_[0-9]/monte_input.py:
compare:
RUN_random_normal_truncate_sd/input.py:
compare:
phase: -1
MONTE_RUN_random_normal_truncate_sd/RUN_[0-9]/monte_input.py:
compare:
RUN_random_normal__untruncate/input.py:
compare:
phase: -1
MONTE_RUN_random_normal__untruncate/RUN_[0-9]/monte_input.py:
compare:
RUN_random_normal_untruncated/input.py:
compare:
phase: -1
MONTE_RUN_random_normal_untruncated/RUN_[0-9]/monte_input.py:
compare:
RUN_random_uniform/input.py:
compare:
phase: -1
MONTE_RUN_random_uniform/RUN_[0-9]/monte_input.py:
compare:
RUN_ERROR_file_inconsistent_skip/input.py:
phase: -1
MONTE_RUN_ERROR_file_inconsistent_skip/RUN_0/monte_input.py:
RUN_ERROR_invalid_call/input.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_ERROR_invalid_call/RUN_0/monte_input.py vs. test/SIM_mc_generation/verif_data/MONTE_RUN_ERROR_invalid_call/RUN_0/monte_input.py
phase: -1
MONTE_RUN_ERROR_invalid_call/RUN_0/monte_input.py:
RUN_ERROR_invalid_name/input.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_ERROR_invalid_name/RUN_0/monte_input.py vs. test/SIM_mc_generation/verif_data/MONTE_RUN_ERROR_invalid_name/RUN_0/monte_input.py
phase: -1
MONTE_RUN_ERROR_invalid_name/RUN_0/monte_input.py:
RUN_ERROR_invalid_sequence/input.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_ERROR_invalid_sequence/RUN_0/monte_input.py vs. test/SIM_mc_generation/verif_data/MONTE_RUN_ERROR_invalid_sequence/RUN_0/monte_input.py
phase: -1
MONTE_RUN_ERROR_invalid_sequence/RUN_0/monte_input.py:
RUN_ERROR_invalid_sequencing/input.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_ERROR_invalid_sequencing/RUN_0/monte_input.py vs. test/SIM_mc_generation/verif_data/MONTE_RUN_ERROR_invalid_sequencing/RUN_0/monte_input.py
phase: -1
MONTE_RUN_ERROR_invalid_sequencing/RUN_0/monte_input.py:
RUN_ERROR_out_of_domain_error/input.py:
compare:
phase: -1
MONTE_RUN_ERROR_out_of_domain_error/RUN_0/monte_input.py:
RUN_ERROR_random_value_truncation/input.py:
phase: -1
MONTE_RUN_ERROR_random_value_truncation/RUN_[0-1]/monte_input.py:
compare:
RUN_generate_meta_data_early/input.py:
compare:
- test/SIM_mc_generation/MonteCarlo_Meta_data_output vs. test/SIM_mc_generation/verif_data/MonteCarlo_Meta_data_output
phase: -1
RUN_file_sequential/input.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_file_sequential/monte_values_all_runs vs. test/SIM_mc_generation/verif_data/MONTE_RUN_file_sequential/monte_values_all_runs
phase: -1
MONTE_RUN_file_sequential/RUN_[0-9]/monte_input.py:
compare:
# Note that technically monte_input.py is an output of RUN_file_sequential, not these runs. The comparisons
# are only placed here due to limitations of TrickOps [min-max] range notation. See the comment block in
# TrickWorkflow.Run.multiply() for details. This workaround is also used in more runs for this sim. -Jordan 1/2023
- test/SIM_mc_generation/MONTE_RUN_file_sequential/RUN_[0-9]/monte_input.py vs. test/SIM_mc_generation/verif_data/MONTE_RUN_file_sequential/RUN_[0-9]/monte_input.py
RUN_file_skip/input.py:
phase: -1
MONTE_RUN_file_skip/RUN_[0-9]/monte_input.py:
compare:
RUN_file_skip2/input.py:
phase: -1
MONTE_RUN_file_skip2/RUN_[0-4]/monte_input.py:
RUN_remove_variable/input_a.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_remove_variable/RUN_both_variables/monte_variables vs. test/SIM_mc_generation/verif_data/MONTE_RUN_remove_variable/RUN_both_variables/monte_variables
RUN_remove_variable/input_b.py:
compare:
- test/SIM_mc_generation/MONTE_RUN_remove_variable/RUN_one_variable/monte_variables vs. test/SIM_mc_generation/verif_data/MONTE_RUN_remove_variable/RUN_one_variable/monte_variables
RUN_WARN_config_error/input.py:
compare:
phase: -1
MONTE_RUN_WARN_config_error/RUN_0/monte_input.py:
RUN_WARN_invalid_name/input.py:
compare:
phase: -1
MONTE_RUN_WARN_invalid_name/RUN_0/monte_input.py:
RUN_WARN_overconstrained_config/input.py:
compare:
phase: -1
RUN_WARN_no_string_values/input.py:
compare:
phase: -1
MONTE_RUN_WARN_overconstrained_config/RUN_0/monte_input.py:
FAIL_config_error/input.py:
returns: 1
FAIL_duplicate_variable/input.py:
returns: 1
FAIL_illegal_config/input.py:
returns: 1
FAIL_invalid_config/input.py:
returns: 1
FAIL_invalid_data_file/input.py:
returns: 1
FAIL_IO_error/input.py:
returns: 1
FAIL_malformed_data_file/input.py:
returns: 1