...
Code Block | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH --time 0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node 128 #SBATCH --cpus-per-task 1 #SBATCH --hint=nomultithread #SBATCH --job-name=mpi #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads export OMP_NUM_THREADS=1 srun xthi | sort -g -k 4 |
Code Block | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH --time 0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node 31 #SBATCH --hint=nomultithread #SBATCH --job-name=mpi_half_node #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads export OMP_NUM_THREADS=1 srun --cpu-bind=rank_ldom xthi | sort -g -k 4 |
Hybrid (MPI+OpenMP)
Code Block | ||
---|---|---|
| ||
#!/bin/bash #SBATCH --time 0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node 8 #SBATCH --cpus-per-task 16 #SBATCH --job-name=hybrid #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # OpenMP and srun, both need to know the number of CPUs per task export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK srun xthi | sort -g -k 4 |
...