Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Code Block
languagebash
#!/bin/bash

#SBATCH --account=<account>          # Your account
#SBATCH --partition=smp
#SBATCH --time=0:10:00
#SBATCH --ntasks=1

# run 100 tasks, but only run 10 at a time
#SBATCH --array=1-100%10
#SBATCH --output=result_%A_%a.out    # gives result_<jobID>_<taskID>.out

echo "SLURM_JOBID:         $SLURM_JOBID"
echo "SLURM_ARRAY_TASK_ID: $SLURM_ARRAY_TASK_ID"
echo "SLURM_ARRAY_JOB_ID:  $SLURM_ARRAY_JOB_ID"

# Here we "translate" the $SLURM_ARRAY_TASK_ID (which takes values from 1-100)
# into an input file, that we want to analyze.
# Suppose 'input_files.txt' is a text file that has 100 lines, each containing
# the respective input file.

INPUT_LIST=input_files.txt

# Read the (SLURM_ARRAY_TASK_ID)th input file
INPUT_FILE=`sed -n "${SLURM_ARRAY_TASK_ID}p" < ${INPUT_LIST}`

srun my_executable $INPUT_FILE


Noteinfo


How you “translate” your task ID into the srun command line is up to you. You could, for example, also have different scripts that you select in some way and execute.

...


MPI

Code Block
languagebash
titlefull node
#!/bin/bash

#SBATCH --time 0:10:00
#SBATCH -p smpmpp
#SBATCH -N 2
#SBATCH --tasks-per-node 1128
#SBATCH --cpus-per-task 64 1
#SBATCH --hint=nomultithread
#SBATCH --job-name=openMPmpi
#SBATCH --output=out_%x.%j

# disable hyperthreading
#SBATCH --hint=nomultithread

module purge
module load    xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0    intel-oneapi-mpi
# module load    xthi/1.0-openmpi4.1.3-gcc8.5.0   openmpi/4.1.3

## Uncomment the following line to enlarge the stacksize if needed,
##  e.g., if your code crashes with a spurious segmentation fault.
# ulimit -s unlimited

# To be Thison bindsthe eachsafe threadside, towe oneemphasize core
export OMP_PROC_BIND=TRUE

# OpenMP and srun, both need to know the number of CPUs per taskthat it is pure MPI, no OpenMP threads
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK1

srun  xthi | sort -g -k 4

MPI


Code Block
languagebash
titlefull partially filled node
#!/bin/bash

#SBATCH --time 0:10:00
#SBATCH -p mpp
#SBATCH -N 2
#SBATCH --tasks-per-node 128
#SBATCH --cpus-per-tasknode 131
#SBATCH --hint=nomultithread
#SBATCH --job-name=mpi_partial_node
#SBATCH --output=out_%x.%j

# disable hyperthreading
#SBATCH --hint=nomultithread

module purge
module load    xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0    intel-oneapi-mpi
# module load    xthi/1.0-openmpi4.1.3-gcc8.5.0   openmpi/4.1.3

## Uncomment the following line to enlarge the stacksize if needed,
##  e.g., if your code crashes with a spurious segmentation fault.
# ulimit -s unlimited

# To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads
export OMP_NUM_THREADS=1

srun  it is pure MPI, no OpenMP threads
export OMP_NUM_THREADS=1

# The --cpu-bind=rank_ldom distributes the tasks via the node's cores
# respecting the node's NUMA domains
srun --cpu-bind=rank_ldom xthi | sort -g -k 4

OpenMP

Code Block
languagebashtitlepartially filled node
#!/bin/bash

#SBATCH --time 0:10:00
#SBATCH -p mpp
#SBATCH -Np 2smp
#SBATCH --tasks-per-node 311
#SBATCH ---hint=nomultithreadcpus-per-task 64
#SBATCH --job-name=mpi_partial_nodeopenMP
#SBATCH --output=out_%x.%j

# disable hyperthreading
#SBATCH --hint=nomultithread

module purge
module load    xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0   intel-oneapi-mpi
# module load    xthi/1.0-openmpi4.1.3-gcc8.5.0   openmpi/4.1.3

## Uncomment the following line to enlarge the stacksize if needed,
##  e.g., if your code crashes with a spurious segmentation fault.
# ulimit -s unlimited

# ToThis bebinds oneach thethread safeto side, we emphasize that it is pure MPI, no OpenMP threadsone core
export OMP_NUMPROC_THREADSBIND=1TRUE

# OpenMP The --cpu-bind=rank_ldom distributes the tasks via the node's cores
# respecting the node's NUMA domains
srun --cpu-bind=rank_ldomand srun, both need to know the number of CPUs per task
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK

srun xthi | sort -g -k 4

Hybrid (MPI+OpenMP)

Code Block
languagebash
#!/bin/bash

#SBATCH --time 0:10:00
#SBATCH -p mpp
#SBATCH -N 2
#SBATCH --tasks-per-node 8
#SBATCH --cpus-per-task 16
#SBATCH --job-name=hybrid
#SBATCH --output=out_%x.%j

# disable hyperthreading
#SBATCH --hint=nomultithread

module purge
module load    xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0   intel-oneapi-mpi
# module load    xthi/1.0-openmpi4.1.3-gcc8.5.0   openmpi/4.1.3

## Uncomment the following line to enlarge the stacksize if needed,
##  e.g., if your code crashes with a spurious segmentation fault.
# ulimit -s unlimited

# This binds each thread to one core
export OMP_PROC_BIND=TRUE

# OpenMP and srun, both need to know the number of CPUs per task
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK

srun xthi | sort -g -k 4

...