[main] testTopDir = /data/mash/cche/azp-agent-in-docker-cuda/azp-agent-avatargpu/regression-tests webTopDir = /data/mash/cche/azp-agent-in-docker-cuda/azp-agent-avatargpu/regression-tests/web sourceTree = C_Src useCmake = 1 isSuperbuild = 1 numMakeJobs = 16 updateGitSubmodules = 1 plot_file_name = plotfile_prefix check_file_name = checkpoint_prefix # don't compress output plotfiles (this prevents them from being deleted automatically) archive_output = 0 # delete plotfiles after compare purge_output = 1 # suiteName is the name prepended to all output directories suiteName = quokka reportActiveTestsOnly = 1 default_branch = development # email sendEmailWhenFail = 0 emailTo = chongchong.he@anu.edu.au, chongchonghe@outlook.com emailBody = quokka regression tests completed # some research groups use slack for communication. If you setup a # slack webhook (see the slack documentation for this), then you can # have the suite directly post when the test begins and ends (and # of # failures) to a slack channel. You need to put the webhook URL in a # plaintext file somewhere that is readable by the suite (for security # reasons, we don't put it in this inputs file, in case this is under # version control). slack_post = 1 slack_webhookfile = /home/cche/quokka-azp-agents/.slack.webhook slack_channel = "#development" slack_username = "buildbot" # MPIcommand should use the placeholders: # @host@ to indicate where to put the hostname to run on # @nprocs@ to indicate where to put the number of processors # @command@ to indicate where to put the command to run # # only tests with useMPI = 1 will run in parallel # nprocs is problem dependent and specified in the individual problem # sections. MPIcommand = mpirun -n @nprocs@ @command@ MPIhost = [AMReX] dir = /data/mash/cche/azp-agent-in-docker-cuda/azp-agent-avatargpu/regression-tests/amrex branch = development [source] dir = /data/mash/cche/azp-agent-in-docker-cuda/azp-agent-avatargpu/regression-tests/quokka branch = development # individual problems follow #[Sedov-GPU] #buildDir = . #target = test_hydro3d_blast #inputFile = inputs/blast_unigrid_128_regression.in #dim = 3 #cmakeSetupOpts = -DAMReX_GPU_BACKEND=CUDA #restartTest = 0 #useMPI = 1 #numprocs = 1 #compileTest = 0 #doVis = 1 #visVar = gasDensity #compareParticles = 1 #particleTypes = tracer_particles #testSrcTree = . #ignore_return_code = 1 [ShockCloud-GPU] buildDir = . target = shock_cloud inputFile = inputs/ShockCloud_64_regression.in dim = 3 cmakeSetupOpts = -DAMReX_GPU_BACKEND=CUDA restartTest = 0 useMPI = 1 numprocs = 1 compileTest = 0 doVis = 1 visVar = temperature compareParticles = 1 particleTypes = tracer_particles testSrcTree = . ignore_return_code = 1 #[RandomBlast-GPU] #buildDir = . #target = random_blast #inputFile = inputs/RandomBlast_regression.in #link1File = extern/grackle_data_files/input/CloudyData_UVB=HM2012.h5 #dim = 3 #cmakeSetupOpts = -DAMReX_GPU_BACKEND=CUDA #restartTest = 0 #useMPI = 1 #numprocs = 1 #compileTest = 0 #doVis = 1 #visVar = temperature #testSrcTree = . #ignore_return_code = 0 # #[RadhydroShell-GPU] #buildDir = . #target = test_radhydro3d_shell #inputFile = inputs/radhydro_shell_regression.in #link1File = extern/dust_shell/initial_conditions.txt #dim = 3 #cmakeSetupOpts = -DAMReX_GPU_BACKEND=CUDA #restartTest = 0 #useMPI = 1 #numprocs = 1 #compileTest = 0 #doVis = 1 #visVar = gasDensity #testSrcTree = . #ignore_return_code = 0 # #[DiskGalaxy-GPU] #buildDir = . #target = disk_galaxy #inputFile = inputs/DiskGalaxy.in #link1File = extern/cooling/CloudyData_UVB=HM2012_resampled.h5 #link2File = extern/agora_data/vcirc.dat #link3File = extern/agora_data/AgoraGalaxy_particles_LOW.txt #dim = 3 #cmakeSetupOpts = -DAMReX_GPU_BACKEND=CUDA #restartTest = 0 #useMPI = 1 #numprocs = 1 #compileTest = 0 #doVis = 1 #visVar = temperature #compareParticles = 1 #particleTypes = tracer_particles CIC_particles StochasticStellarPop_particles #testSrcTree = . #ignore_return_code = 0