Skip to content
Snippets Groups Projects
Commit 14c9e5f2 authored by Christoph Knote's avatar Christoph Knote
Browse files

Make batch config global

parent 2a65f0fe
No related branches found
No related tags found
No related merge requests found
Showing
with 1 addition and 432 deletions
*/config.bash
*/batch_preambles
**/.DS_Store
21
\ No newline at end of file
22
\ No newline at end of file
......@@ -57,26 +57,6 @@ restartRootDir=${SCRATCH}/WRF/restart/
# remove run directory after run is finished?
removeRunDir=false
# --- MPI settings ---
mpiCommandPre="mpirun /usr/bin/time -v"
mpiCommandMain="mpirun -mca pml ucx -mca coll ^hcoll -x UCX_TLS=tcp,shm -x UCX_NET_DEVICES=eth5 /usr/bin/time -v"
mpiCommandReal=${mpiCommandPre}
# --- Batch system ---
# argument to submit a job in a held state
batchHoldArgument="--hold"
# command to release a held job
batchReleaseCommand="scontrol release"
# command to submit jobs to the queueing system
batchSubmitCommand=sbatch
# dependency argument for chaining runs upon submission
batchDepArgument="--dependency=afterok:__id__"
# sed command ("used as s/__command/\1/") to retrieve job run PID upon
# submission with $batchSubmitCommand
batchPidSedCommand="Submitted batch job \(.*\)"
# --- Chemistry ---
withChemistry=true
......
#!/bin/bash -l
#SBATCH --partition=alcc1,epyc
#SBATCH -o __runDir__/__mainJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __mainJobName__
#SBATCH --nodes=2
#SBATCH --ntasks-per-node=28
#SBATCH --mem-per-cpu=2000
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=12:00:00
#
# variable $MACHINEFILE just holds the filename where acquired
# nodes/cores names are written. e.g.
#
MACHINEFILE=__runDir__/slurm.hosts
#
# Generate Machinefile for openmpi such that hosts are in the same
# order as if run via srun
#
srun hostname -s | sort -n > $MACHINEFILE
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__postJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __postJobName__
#SBATCH --ntasks=1
#SBATCH --mem=5G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=02:00:00
#
# variable $MACHINEFILE just holds the filename where acquired
# nodes/cores names are written. e.g.
#
MACHINEFILE=__runDir__/slurm.hosts
#
# Generate Machinefile for openmpi such that hosts are in the same
# order as if run via srun
#
srun hostname -s | sort -n > $MACHINEFILE
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__preJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __preJobName__
#SBATCH --ntasks=1
#SBATCH --mem=10G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=03:00:00
#
# variable $MACHINEFILE just holds the filename where acquired
# nodes/cores names are written. e.g.
#
MACHINEFILE=__runDir__/slurm.hosts
#
# Generate Machinefile for openmpi such that hosts are in the same
# order as if run via srun
#
srun hostname -s | sort -n > $MACHINEFILE
#!/bin/bash -l
#SBATCH --partition=alcc1,epyc
#SBATCH -o __runDir__/__spinupJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __spinupJobName__
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=28
#SBATCH --mem-per-cpu=2000
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=01:00:00
#
# variable $MACHINEFILE just holds the filename where acquired
# nodes/cores names are written. e.g.
#
MACHINEFILE=__runDir__/slurm.hosts
#
# Generate Machinefile for openmpi such that hosts are in the same
# order as if run via srun
#
srun hostname -s | sort -n > $MACHINEFILE
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__stagingJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __stagingJobName__
#SBATCH --ntasks=1
#SBATCH --mem=5G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=01:00:00
#
# variable $MACHINEFILE just holds the filename where acquired
# nodes/cores names are written. e.g.
#
MACHINEFILE=__runDir__/slurm.hosts
#
# Generate Machinefile for openmpi such that hosts are in the same
# order as if run via srun
#
srun hostname -s | sort -n > $MACHINEFILE
#!/bin/bash
# ------------------------------------------------------------------------------
# WRFOTRON v 2.0b
# Christoph Knote (LMU Munich, Germany)
# 06/2016
# christoph.knote@lmu.de
# ------------------------------------------------------------------------------
# path to the WRFotron installation
chainDir=${HOME}/wrfotron
# --- Executable locations ---
# WPS installation directory
WPSDir=${WPS_SRC_PATH}
# WRF installation directory
WRFDir=${WRF_SRC_PATH}
# --- Input data settings ---
# path to geogrid input data
geogDir=${WRF_GEOG_PATH}
# meteo input
# Vtable for the chosen meteo input
metVtableFile=${WPS_SRC_PATH}/ungrib/Variable_Tables/Vtable.GFS
# time increment in hours
metInc=1
# full path to a met input file - you can use any "%<>" abbreviations known
# to the "date" command
metFilePattern="${WRF_GFS_METEO_PATH}/GF%Y%m%d%H"
# example:
# "/glade/p/rda/data/ds083.2/grib2/%Y/%Y.%m/fnl_%Y%m%d_%H_00.grib2"
# --- Pre/Postprocessing settings ---
# prepararation script
preScriptPath=NONEXISTENT.bash
# postprocessing scripts (arbitrary)
postScriptPath=NONEXISTENT.bash
# postprocessing scripts (actions for each wrfout file)
postPerFileScriptPath=NONEXISTENT.bash
# --- Working directories ---
# where the WRF will be run (some fast, large disk like "scratch" or similar)
workDir=${SCRATCH}/WRF/work/
# where the unprocessed WRF output will be stored
stagingRootDir=${SCRATCH}/WRF/staging/
# where the WRF output will be stored
archiveRootDir=${SCRATCH}/archive/WRF/
# where the WRF restart files will be stored
restartRootDir=${SCRATCH}/WRF/restart/
# remove run directory after run is finished?
removeRunDir=false
# --- MPI settings ---
# mental note for GNU:
#mpirun -mca pml ucx -x UCX_TLS=rc,shm -N $SLURM_NTASKS_PER_NODE -hostfile $MACHINEFILE
mpiCommandPre="mpirun -mca pml ucx -x UCX_TLS=rc,shm -hostfile $MACHINEFILE /usr/bin/time -v"
mpiCommandMain="mpirun -mca pml ucx -x UCX_TLS=rc,shm -N $SLURM_NTASKS_PER_NODE -hostfile $MACHINEFILE /usr/bin/time -v"
mpiCommandReal=${mpiCommandPre}
# mental note for INTEL:
#mpiCommandPre="srun /usr/bin/time -v"
#mpiCommandMain="srun /usr/bin/time -v"
#mpiCommandReal="srun /usr/bin/time -v"
# --- Batch system ---
# argument to submit a job in a held state
batchHoldArgument="--hold"
# command to release a held job
batchReleaseCommand="scontrol release"
# command to submit jobs to the queueing system
batchSubmitCommand=sbatch
# dependency argument for chaining runs upon submission
batchDepArgument="--dependency=afterany:__id__"
# sed command ("used as s/__command/\1/") to retrieve job run PID upon
# submission with $batchSubmitCommand
batchPidSedCommand="Submitted batch job \(.*\)"
# --- Chemistry ---
withChemistry=true
# WRF-Chem installation directory
WRFChemDir=${WRF_CHEM_SRC_PATH}
# megan_bio_emiss installation directory
WRFMEGANdir=${WRF_CHEM_MEGAN_BIO_EMISS_PATH}
# mozbc installation directory
WRFMOZARTdir=${WRF_CHEM_MOZBC_PATH}
# wesley/exocoldens installation directory
WRFmztoolsdir=${WRF_CHEM_WES_COLDENS_PATH}
# anthro_emiss installation directory
WRFanthrodir=${WRF_CHEM_ANTHRO_EMIS_PATH}
# fire_emis installation directory
WRFfiredir=${WRF_CHEM_FIRE_EMIS_PATH}
# path to MEGAN input data
MEGANdir=/alcc/gpfs2/home/mbees/data/emissions/biogenic/MEGAN
# use anthro_emiss or predefined files?
emissUseAnthroEmiss=false
# raw emission input - the files you read in with anthro_emiss
emissDir=/alcc/gpfs2/home/mbees/data/emissions/anthropogenic/EDGAR-HTAP/MOZART_MOSAIC
# emission conversion script for anthro_emis - must match emissions in emissDir
emissInpFile=emis_edgarhtap_mozmos.inp
# year the emissions are valid for (for offset calculation)
emissYear=2010
# FINN fires
fireFilePattern="/alcc/gpfs2/home/mbees/data/emissions/fires/FINN/GLOB_MOZ4_%Y%j.txt"
fireInpFile=finn_fires.inp
# boundary condition input
chembcFilePattern="/alcc/gpfs2/home/mbees/data/chembc/WACCM/WACCM%Y%m%d"
chembcInpFile=waccm.inp
# TUV photolysis option 4 data file
TUVDataPath="/alcc/gpfs2/home/mbees/data/tuv/TUV.phot.bz2"
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__mainJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __mainJobName__
#SBATCH --nodes=3
#SBATCH --ntasks-per-node=28
#SBATCH --mem-per-cpu=2000
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=18:00:00
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__spinupJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __spinupJobName__
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=28
#SBATCH --mem-per-cpu=2000
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=01:00:00
#!/bin/bash
# ------------------------------------------------------------------------------
# WRFOTRON v 2.0b
# Christoph Knote (LMU Munich, Germany)
# 06/2016
# christoph.knote@lmu.de
# ------------------------------------------------------------------------------
# path to the WRFotron installation
chainDir=${HOME}/wrfotron
# --- Executable locations ---
# WPS installation directory
WPSDir=${WPS_SRC_PATH}
# WRF installation directory
WRFDir=${WRF_SRC_PATH}
# --- Input data settings ---
# path to geogrid input data
geogDir=${WRF_GEOG_PATH}
# meteo input
# Vtable for the chosen meteo input
metVtableFile=${WPS_SRC_PATH}/ungrib/Variable_Tables/Vtable.GFS
# time increment in hours
metInc=1
# full path to a met input file - you can use any "%<>" abbreviations known
# to the "date" command
metFilePattern="${WRF_GFS_METEO_PATH}/GF%Y%m%d%H"
# example:
# "/glade/p/rda/data/ds083.2/grib2/%Y/%Y.%m/fnl_%Y%m%d_%H_00.grib2"
# --- Pre/Postprocessing settings ---
# prepararation script
preScriptPath=NONEXISTENT.bash
# postprocessing scripts (arbitrary)
postScriptPath=NONEXISTENT.bash
# postprocessing scripts (actions for each wrfout file)
postPerFileScriptPath=NONEXISTENT.bash
# --- Working directories ---
# where the WRF will be run (some fast, large disk like "scratch" or similar)
workDir=${SCRATCH}/WRF/work/
# where the unprocessed WRF output will be stored
stagingRootDir=${SCRATCH}/WRF/staging/
# where the WRF output will be stored
archiveRootDir=${SCRATCH}/archive/WRF/
# where the WRF restart files will be stored
restartRootDir=${SCRATCH}/WRF/restart/
# remove run directory after run is finished?
removeRunDir=false
# --- MPI settings ---
mpiCommandPre="mpirun /usr/bin/time -v"
mpiCommandMain="mpirun /usr/bin/time -v"
mpiCommandReal=${mpiCommandPre}
# --- Batch system ---
# argument to submit a job in a held state
batchHoldArgument="--hold"
# command to release a held job
batchReleaseCommand="scontrol release"
# command to submit jobs to the queueing system
batchSubmitCommand=sbatch
# dependency argument for chaining runs upon submission
batchDepArgument="--dependency=afterok:__id__"
# sed command ("used as s/__command/\1/") to retrieve job run PID upon
# submission with $batchSubmitCommand
batchPidSedCommand="Submitted batch job \(.*\)"
# --- Chemistry ---
withChemistry=true
# WRF-Chem installation directory
WRFChemDir=${WRF_CHEM_SRC_PATH}
# megan_bio_emiss installation directory
WRFMEGANdir=${WRF_CHEM_MEGAN_BIO_EMISS_PATH}
# mozbc installation directory
WRFMOZARTdir=${WRF_CHEM_MOZBC_PATH}
# wesley/exocoldens installation directory
WRFmztoolsdir=${WRF_CHEM_WES_COLDENS_PATH}
# anthro_emiss installation directory
WRFanthrodir=${WRF_CHEM_ANTHRO_EMIS_PATH}
# fire_emis installation directory
WRFfiredir=${WRF_CHEM_FIRE_EMIS_PATH}
# path to MEGAN input data
MEGANdir=/alcc/gpfs2/home/mbees/data/emissions/biogenic/MEGAN
# use anthro_emiss or predefined files?
emissUseAnthroEmiss=false
# raw emission input - the files you read in with anthro_emiss
emissDir=/alcc/gpfs2/home/mbees/data/emissions/anthropogenic/EDGARv5/MOZART_MOSAIC
# emission conversion script for anthro_emis - must match emissions in emissDir
emissInpFile=emis_edgarv5_mozmos.inp
# year the emissions are valid for (for offset calculation)
emissYear=2015
# FINN fires
fireFilePattern="/alcc/gpfs2/home/mbees/data/emissions/fires/FINN/GLOB_MOZ4_%Y%j.txt"
fireInpFile=finn_fires.inp
# boundary condition input
chembcFilePattern="/alcc/gpfs2/home/mbees/data/chembc/WACCM/WACCM%Y%m%d"
chembcInpFile=waccm.inp
# TUV photolysis option 4 data file
TUVDataPath="/alcc/gpfs2/home/mbees/data/tuv/TUV.phot.bz2"
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__postJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __postJobName__
#SBATCH --ntasks=1
#SBATCH --mem=5G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=02:00:00
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__preJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __preJobName__
#SBATCH --ntasks=1
#SBATCH --mem=10G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=03:00:00
#!/bin/bash -l
#SBATCH --partition=alcc1
#SBATCH -o __runDir__/__stagingJobName__.%j.%N.out
#SBATCH -D __runDir__
#SBATCH -J __stagingJobName__
#SBATCH --ntasks=1
#SBATCH --mem=5G
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=christoph.knote@med.uni-augsburg.de
#SBATCH --time=01:00:00
File moved
File moved
File moved
File moved
File moved
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment