Commit 921ffcc1 authored by Christophe Hourdin's avatar Christophe Hourdin
Browse files

split header files in header + module

parent 8cbf003c
#!/bin/bash
# argument: jobid
# if the job is running, the job number is in the SLURM_JOBID shell variable
verbose=false
if [ -z ${SLURM_JOBID+x} ]; then
running=false
else
running=true
fi
printf "\nrunning = ${running}\n"
printf "verbose = ${verbose}\n"
printf "\n pour avoir des infos sur un job en cours d'execution : \n"
printf " ccc_mpp -u ${USER}\n"
printf " avec 'ccc_mpp -h' la liste des options disponibles\n"
printf "\n pour avoir des infos sur un job pendant ou après : \n"
printf " ccc_macct JOBID\n"
printf "\n pour avoir des infos l'occupation et composition de la machine : \n"
printf " ccc_mpinfo\n"
if [ "$#" == "1" ]; then
JOBID2=$1
printf "\nJOBID = ${JOBID2}\n"
echo ""
echo "> ccc_mpp -n -u ${USER}"
ccc_mpp -n -u ${USER} # -n : prints results without colors to avoid printing problem in the output file
echo ""
echo "> ccc_macct ${JOBID2}"
ccc_macct ${JOBID2}
echo ""
echo ""
else
printf "\n\n\n Passez le jobid en argument!! \n\n\n\n"
fi
IRENE.cpu_info.sh
\ No newline at end of file
#!/bin/bash
############################# from IRENE-AMD_Rome.header #############################
############################# from IRENE-AMD.header #############################
#===============================================================================
# IRENE-AMD_Rome.header to submit a MPI batch job
# IRENE-AMD.header to submit a MPI batch job
#===============================================================================
#======= Job name & ascii output
#MSUB -r <exp> # request name
#MSUB -o <exp_lst>.jobid_%I.txt
#MSUB -e <exp_lst>.jobid_%I.txt
#MSUB -r <exp> # request name
#MSUB -o <exp_lst>.jobid_%I.txt # output file name
#MSUB -e <exp_lst>.jobid_%I.txt # error output file name
#MSUB -j oe
############ CHANGER LA DOC INTEL par AMD
#======= IreneSKL (skylake)
### CPUs: 2x24-cores Intel Skylake@2.7GHz (AVX512) Cores/Node: 48
### RAM/Core: 3.75GB => RAM/Node: 180GB
### >ccc_mpinfo
### --------------CPUS------------ -------------NODES------------
### PARTITION STATUS TOTAL DOWN USED FREE TOTAL DOWN USED FREE MpC CpN SpN CpS TpC
### --------- ------ ------ ------ ------ ------ ------ ------ ------ ------ ----- --- --- --- ---
### skylake up 79056 96 78650 310 1647 2 1642 3 3750 48 2 24 1
############ CHANGER LA DOC INTEL par AMD
#-------------------------------------------------------------------------------
# cpu : AMD Rome epyc
# Cores/Node : 128 (2x64)
# RAM/Node : 256 GB (2GB/core)
#-------------------------------------------------------------------------------
#MSUB -q rome
#MSUB -n <mpi_task> # number of MPI task
......@@ -33,52 +27,14 @@
#MSUB -A gen1140
#MSUB -m work,scratch,store
#======= Reserve Time CPU
#------- Test Queue
#SUB -Q test # with 1800 s max for time
#SUB -Q test # Queue test with 1800 s max for time
#MSUB -T <max_cpu_time>
#------ Production Queue for Intel
# can11sen2cp : (parent) LLm0=224, MMm0=288, N=50 (children) LLm0=200, MMm0=300, N=50
# ~ 07h40mn for agrif/pisces | pdt=400 | 96 cores/ 4 nodes
# ~ 05h20mn for agrif/pisces | pdt=600 | 96 cores/ 4 nodes
#======= Various
# #MSUB -M mem # required amount of memory per core in Mo
# #MSUB -E extra # extra parameters to pass directly to the underlying resource mgr
# #MSUB -K # only allocates resources. If a program is defined it will be executed only once.
# It would contain ccc_mprun calls to launch parallel commands using the allocated resources.
# #MSUB -e ' options ' # additional parameters to pass to the mpirun command
# #MSUB -d ddt # launches the application in debug mode using DDT
#===============================================================================
umask 022
#set -u
set +x # (because set -x by defult on Irene)
set +x # (because set -x by default on Irene)
echo ""
echo "date_chris : `date "+%Y%m%d-%H:%M:%S"`"
module purge
module load flavor/buildmpi/openmpi/4.0
module load intel/20.0.0
#module load intel/19.0.5.281
module load mpi/openmpi/4.0.3
module load flavor/buildcompiler/intel/20
#module load flavor/buildcompiler/intel/19
module load flavor/hdf5/parallel
module load netcdf-fortran/4.4.4
module load netcdf-c/4.6.0
module load hdf5/1.8.20
module purge
module load flavor/buildmpi/openmpi/4.0
module load intel/20.0.0
#module load intel/19.0.5.281
module load mpi/openmpi/4.0.3
module load flavor/buildcompiler/intel/20
#module load flavor/buildcompiler/intel/19
module load flavor/hdf5/parallel
module load netcdf-fortran/4.4.4
module load netcdf-c/4.6.0
module load hdf5/1.8.20
#!/bin/bash
# argument: jobid
#===============================================================================
# Commandes suivi de job (Partie commune IRENE & IRENE-AMD)
#===============================================================================
# > machine.info # infos sur processeurs
# > ccc_mpp -u ${USER} # infos sur mes jobs en cours d'execution
# > ccc_macct JOBID # infos sur mes jobs pendant ou après
# > ccc_mpinfo # infos sur occupation et composition de la machine
#
# --------------CPUS------------ -------------NODES------------
# PARTITION STATUS TOTAL DOWN USED FREE TOTAL DOWN USED FREE MpC CpN SpN CpS TpC
# --------- ------ ------ ------ ------ ------ ------ ------ ------ ------ ----- --- --- --- ---
# skylake up 79056 96 78650 310 1647 2 1642 3 3750 48 2 24 1
# argument: jobid
# if the job is running, the job number is in the SLURM_JOBID shell variable
verbose=false
if [ -z ${SLURM_JOBID+x} ]; then
running=false
else
......@@ -18,14 +33,14 @@ printf "\nrunning = ${running}\n"
printf "verbose = ${verbose}\n"
printf "\n pour avoir des infos sur un job en cours d'execution : \n"
printf "\n infos sur mes jobs en cours d'execution : \n"
printf " ccc_mpp -u ${USER}\n"
printf " avec 'ccc_mpp -h' la liste des options disponibles\n"
printf "\n pour avoir des infos sur un job pendant ou après : \n"
printf "\n infos sur mes jobs pendant ou après : \n"
printf " ccc_macct JOBID\n"
printf "\n pour avoir des infos l'occupation et composition de la machine : \n"
printf "\n infos sur occupation et composition de la machine : \n"
printf " ccc_mpinfo\n"
......
#!/bin/bash
############################# from IRENE-Intel_Skylake.header #############################
############################# from IRENE.header #############################
#===============================================================================
# IRENE-Intel_Skylake.header to submit a MPI batch job
# IRENE.header to submit a MPI batch job
#===============================================================================
#======= Job name & ascii output
#MSUB -r <exp> # request name
#MSUB -o <exp_lst>.jobid_%I.txt
#MSUB -e <exp_lst>.jobid_%I.txt
#MSUB -r <exp> # request name
#MSUB -o <exp_lst>.jobid_%I.txt # output file name
#MSUB -e <exp_lst>.jobid_%I.txt # error output file name
#MSUB -j oe
#======= IreneSKL (skylake)
### CPUs: 2x24-cores Intel Skylake@2.7GHz (AVX512) Cores/Node: 48
### RAM/Core: 3.75GB => RAM/Node: 180GB
### >ccc_mpinfo
### --------------CPUS------------ -------------NODES------------
### PARTITION STATUS TOTAL DOWN USED FREE TOTAL DOWN USED FREE MpC CpN SpN CpS TpC
### --------- ------ ------ ------ ------ ------ ------ ------ ------ ------ ----- --- --- --- ---
### skylake up 79056 96 78650 310 1647 2 1642 3 3750 48 2 24 1
#-------------------------------------------------------------------------------
# cpu : Intel Skylake
# Cores/Node : 48 (2x24)
# RAM/Node : 180 GB (3.75GB/core)
#-------------------------------------------------------------------------------
#MSUB -q skylake
#MSUB -q skylake
#MSUB -n <mpi_task> # number of MPI task
#MSUB -c <cpu_per_task> # number of threads by MPI task (for OPENMP or to reserve more memory)
# #MSUB -N 4 # number of nodes to use (better to use -c option to reserve more memory)
......@@ -31,65 +27,14 @@
#MSUB -A gen1140
#MSUB -m work,scratch,store
#======= Reserve Time CPU
#------- Test Queue
#SUB -Q test # with 1800 s max for time
#SUB -Q test # Queue test with 1800 s max for time
#MSUB -T <max_cpu_time>
#------ Production Queue for Intel
# can11sen2cp : (parent) LLm0=224, MMm0=288, N=50 (children) LLm0=200, MMm0=300, N=50
# ~ 07h40mn for agrif/pisces | pdt=400 | 96 cores/ 4 nodes
# ~ 05h20mn for agrif/pisces | pdt=600 | 96 cores/ 4 nodes
#======= Various
# #MSUB -M mem # required amount of memory per core in Mo
# #MSUB -E extra # extra parameters to pass directly to the underlying resource mgr
# #MSUB -K # only allocates resources. If a program is defined it will be executed only once.
# It would contain ccc_mprun calls to launch parallel commands using the allocated resources.
# #MSUB -e ' options ' # additional parameters to pass to the mpirun command
# #MSUB -d ddt # launches the application in debug mode using DDT
#===============================================================================
umask 022
#set -u
set +x # (because set -x by defult on Irene)
set +x # (because set -x by default on Irene)
echo ""
echo "date_chris : `date "+%Y%m%d-%H:%M:%S"`"
module purge >/dev/null 2>&1
# From Seb 02/08/2021 see $HOME/loadintel.sh & $HOME/loadgcc.sh
module load intel/19.0.5.281
module load mpi/openmpi/4.0.2
module load flavor/hdf5/parallel
module load flavor/buildmpi/openmpi/4.0
module load netcdf-c/4.6.0
module load netcdf-fortran/4.4.4
module load hdf5/1.8.20
module load nco/4.9.1
module load cdo/1.9.5
module load ncview/2.1.7
module load boost/1.69.0
module load blitz/0.10
module load fftw3/mkl/19.0.5.281 # for paraver
module load c++/gnu/7.3.0
module load c/gnu/7.3.0
#module purge
#module load flavor/buildcompiler/intel/19
#module load mpi/openmpi/2.0.4
# from XIOS
#module load flavor/hdf5/parallel
#module load netcdf-fortran/4.4.4
#module load netcdf-c/4.6.0
#module load hdf5/1.8.20
module purge >/dev/null 2>&1
# From Seb 02/08/2021 see $HOME/loadintel.sh & $HOME/loadgcc.sh
module load intel/19.0.5.281
module load mpi/openmpi/4.0.2
module load flavor/hdf5/parallel
module load flavor/buildmpi/openmpi/4.0
module load netcdf-c/4.6.0
module load netcdf-fortran/4.4.4
module load hdf5/1.8.20
module load nco/4.9.1
module load cdo/1.9.5
module load ncview/2.1.7
module load boost/1.69.0
module load blitz/0.10
module load fftw3/mkl/19.0.5.281 # for paraver
module load c++/gnu/7.3.0
module load c/gnu/7.3.0
#module purge
#module load flavor/buildcompiler/intel/19
#module load mpi/openmpi/2.0.4
# from XIOS
#module load flavor/hdf5/parallel
#module load netcdf-fortran/4.4.4
#module load netcdf-c/4.6.0
#module load hdf5/1.8.20
#!/bin/bash
# argument: jobid
#===============================================================================
# Commandes suivi de job (Partie commune IRENE & IRENE-AMD)
#===============================================================================
# argument: jobid
verbose=false
if [ -z ${SLURM_JOB_ID+x} ]; then
running=false
else
......
......@@ -5,97 +5,51 @@
#===============================================================================
# JEANZAY.header to submit a MPI batch job
#===============================================================================
#-------------------------------------------------------------------------------
# Job name & ascii output
#-------------------------------------------------------------------------------
#SBATCH --job-name=<exp> # request name
#SBATCH --output=<exp_lst>.jobid_%j.txt # output file name
#SBATCH --error=<exp_lst>.jobid_%j.txt # error output file name
#SBATCH --job-name=<exp> # request name
#SBATCH --output=<exp_lst>.jobid_%j.txt # output file name
#SBATCH --error=<exp_lst>.jobid_%j.txt # error output file name
#-------------------------------------------------------------------------------
# Processors (scalar or CPU partition) : 1 node = 40 cores & 40x4 = 160 Go
#-------------------------------------------------------------------------------
# http://www.idris.fr/jean-zay/cpu/jean-zay-cpu-hw.html
#
# 1 noeud contient 2 processeurs : processors Intel Cascade Lake 6248
# 1 processeur contient 20 coeurs à 2.5 GHz
# 1 coeur peut faire tourner 2 processus (cpu ou thread) en hyperthreading
# 1 coeur dispose de 4Go de mémoire
#
# => 1 noeud = 40 coeurs = 80 threads ou cpu = 160 Go
#
# "scontrol show job $JOBID" give all informations about the job (memory, core, node, time... )
# cpu : Intel Cascade Lake 6248
# Cores/Node : 40
# RAM/Node : 160 GB (4GB/core)
#-------------------------------------------------------------------------------
# tout travail demandant plus d'un nœud tourne en mode exclusif
#SBATCH --ntasks=<mpi_task> # Total number of mpi task to use
# #SBATCH --ntasks-per-node=40 # Number of mpi task per node
#SBATCH --cpus-per-task=<cpu_per_task> # --cpus-per-task=1 (default)
#SBATCH --hint=nomultithread # 1 mpi task per core # for Slurm, "multithread" = hyperthreading.
#SBATCH --ntasks=<mpi_task> # Total number of mpi task to use
# #SBATCH --ntasks-per-node=40 # number of mpi task per node
#SBATCH --cpus-per-task=<cpu_per_task> # --cpus-per-task=1 (default)
#SBATCH --hint=nomultithread # 1 mpi task per core # for Slurm, "multithread" = hyperthreading.
# pour avoir plus de mémoire que la mémoire réservée par le nombre de coeur (4Go / coeur)
# on peut réserver N noeuds (N * 160 Go) avec :
# ntasks-per-node = ntasks / N
#SBATCH --account eee@cpu
#-------------------------------------------------------------------------------
# partitions Slurm CPU
# partitions Slurm CPU & cpu time
#-------------------------------------------------------------------------------
# http://www.idris.fr/jean-zay/cpu/jean-zay-cpu-exec_partition_slurm.html
###SBATCH --partition=prepost # pre/post nodes : no time cpu used / time default=02:00:00 / time limit < 20:00:00
###SBATCH --partition=visu # visualization node : no time cpu used / time default=00:10:00 / time limit < 01:00:00
###SBATCH --partition=archive # no time cpu used / time default=02:00:00 / time limit < 20:00:00
#SBATCH --partition=cpu_p1 # time limit = HH:MM:SS ≤ 100:00:00 (default)
# #SBATCH --partition=prepost # pre/post nodes : no time cpu used / time default=02:00:00 / time limit < 20:00:00
# #SBATCH --partition=visu # visualization node : no time cpu used / time default=00:10:00 / time limit < 01:00:00
# #SBATCH --partition=archive # no time cpu used / time default=02:00:00 / time limit < 20:00:00
# #SBATCH --partition=cpu_p1 # time limit = HH:MM:SS ≤ 100:00:00 (default)
#SBATCH --partition=cpu_p1
# if cpu_p1 partition : QoS (Quality of Service) choice
# time limit | ressources limit per job
###SBATCH --qos=qos_cpu-t3 # 20h | 512 nodes = 20480 cores (default)
###SBATCH --qos=qos_cpu-t4 # 100h | 4 nodes = 160 cores
#SBATCH --qos=qos_cpu-dev # 2h | 128 nodes = 5120 cores
# #SBATCH --qos=qos_cpu-t3 # 20h | 512 nodes = 20480 cores (default)
# #SBATCH --qos=qos_cpu-t4 # 100h | 4 nodes = 160 cores
# #SBATCH --qos=qos_cpu-dev # 2h | 128 nodes = 5120 cores
#SBATCH --qos=<qos>
#SBATCH --time=<max_cpu_time> # max cpu time
#SBATCH --time=<max_cpu_time> # max cpu time
#-------------------------------------------------------------------------------
# Account
#-------------------------------------------------------------------------------
#SBATCH --account eee@cpu
#===============================================================================
umask 022
set -u
echo ""
echo "date_chris : `date "+%Y%m%d-%H:%M:%S"`"
#source $I_MPI_ROOT/intel64/bin/mpivars.sh release_mt
# from XIOS_X64_JEANZAY_trunk_r2219-2021_09_06/arch/arch-X64_JEANZAY.env
# module purge
# module load gcc/9.1.0
# module load intel-all
# module load gcc/9.1.0
# module load hdf5/1.10.5-mpi
# module load netcdf/4.7.2-mpi
# module load netcdf-fortran/4.5.2-mpi
# from XIOS_X64_JEANZAY_v2.5_r2152-2021_06_10/arch/arch-X64_JEANZAY.env
# module purge
# module load intel-compilers/19.0.4 intel-mkl/19.0.4 intel-mpi/19.0.4
# module load hdf5/1.10.5-mpi
# module load netcdf/4.7.2-mpi
# module load netcdf-fortran/4.5.2-mpi
module purge
module load intel-compilers/19.0.4
module load intel-mpi/19.0.4
module load netcdf-fortran/4.5.2-mpi
module load netcdf/4.7.2-mpi
module load hdf5/1.10.5-mpi
# from XIOS_X64_JEANZAY_trunk_r2219-2021_09_06/arch/arch-X64_JEANZAY.env
# module purge
# module load gcc/9.1.0
# module load intel-all
# module load gcc/9.1.0
# module load hdf5/1.10.5-mpi
# module load netcdf/4.7.2-mpi
# module load netcdf-fortran/4.5.2-mpi
# from XIOS_X64_JEANZAY_v2.5_r2152-2021_06_10/arch/arch-X64_JEANZAY.env
# module purge
# module load intel-compilers/19.0.4 intel-mkl/19.0.4 intel-mpi/19.0.4
# module load hdf5/1.10.5-mpi
# module load netcdf/4.7.2-mpi
# module load netcdf-fortran/4.5.2-mpi
module purge
module load intel-compilers/19.0.4
module load intel-mpi/19.0.4
module load netcdf-fortran/4.5.2-mpi
module load netcdf/4.7.2-mpi
module load hdf5/1.10.5-mpi
module load ncview
......@@ -16,6 +16,7 @@ fi
. ./namelist_exp.sh
. ./${COMPUTER}.env
. ./${COMPUTER}.header
. ./${COMPUTER}.module; module list -t > ${COMPDIR}/croco/module_list_${COMPUTER}.txt
if [ -d ${COMPDIR}/croco ] ; then
......
......@@ -101,13 +101,15 @@ export USE_XIOS=true
# Time CPU run
if [ ${COMPUTER} == "JEANZAY" ] ; then
export time_cpu_run="01:00:00"
export MAX_CPU_TIME="00:30:00"; export QOS="qos_cpu-dev" # 2h | 128 nodes = 5120 cores
# export MAX_CPU_TIME="03:00:00"; export QOS="qos_cpu-t3" # 20h | 512 nodes = 20480 cores (default)
# export MAX_CPU_TIME="40:00:00"; export QOS="qos_cpu-t4" # 100h | 4 nodes = 160 cores
elif [ ${COMPUTER} == "IRENE" ] ; then
export time_cpu_run="1800" # for Queue test max 1800 s
# export time_cpu_run="7200" # 2 heures
export MAX_CPU_TIME="1800" # for Queue test max 1800 s
# export MAX_CPU_TIME="7200" # 2 heures
elif [ ${COMPUTER} == "IRENE-AMD" ] ; then
export time_cpu_run="1800" # for Queue test max 1800 s
# export time_cpu_run="36000" # 10 heures
export MAX_CPU_TIME="1800" # for Queue test max 1800 s
# export MAX_CPU_TIME="36000" # 10 heures
fi
# files to save in ascii jobdir after running
......
......@@ -91,9 +91,15 @@ sed -e "s/<exp>/${ROOT_NAME_1}/g" \
-e "s/<exp_lst>/${listing_root_name}/" \
-e "s/<mpi_task>/$(( ${NPROC_X} * ${NPROC_Y} * ${CPU_PER_TASK} + ${NXIOS2} ))/" \
-e "s/<cpu_per_task>/${CPU_PER_TASK}/" \
-e "s/<max_cpu_time>/${time_cpu_run}/" \
./${COMPUTER}.header > HEADER_tmp
cat HEADER_tmp ./common_definitions.sh ./namelist_exp.sh ./namelist_period.sh ./${COMPUTER}.env job.base.sh > ${JOBDIR_ROOT}/${jobname}
-e "s/<max_cpu_time>/${MAX_CPU_TIME}/" \
./${COMPUTER}.header > HEADER_tmp1
if [ ${COMPUTER} == "JEANZAY" ] ; then
sed -e "s/<qos>/${QOS}/" \
HEADER_tmp1 > HEADER_tmp
fi
cat HEADER_tmp ./${COMPUTER}.module ./common_definitions.sh ./namelist_exp.sh ./namelist_period.sh ./${COMPUTER}.env job.base.sh > ${JOBDIR_ROOT}/${jobname}
\rm HEADER_tmp
chmod 755 ${JOBDIR_ROOT}/${jobname}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment