...
Code Block | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH --account=<account> # Your account #SBATCH --time =0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node =128 #SBATCH --cpus-per-task =1 #SBATCH --hint=nomultithread #SBATCH --job-name=mpi #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads export OMP_NUM_THREADS=1 srun xthi | sort -g -k 4 |
...
Code Block | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH --account=<account> # Your account #SBATCH --time =0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node =31 #SBATCH --hint=nomultithread #SBATCH --job-name=mpi_partial_node #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads export OMP_NUM_THREADS=1 # The --cpu-bind=rank_ldom distributes the tasks via the node's cores # respecting the node's NUMA domains srun --cpu-bind=rank_ldom xthi | sort -g -k 4 |
...
Code Block | ||
---|---|---|
| ||
#!/bin/bash #SBATCH --account=<account> # Your account #SBATCH --time =0:10:00 #SBATCH -p smp #SBATCH --tasks-per-node =1 #SBATCH --cpus-per-task =64 #SBATCH --job-name=openMP #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # export OMP_STACKSIZE=128M # This binds each thread to one core export OMP_PROC_BIND=TRUE # OpenMP and srun, both need to know the number of CPUs per task export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK srun xthi | sort -g -k 4 |
...
Code Block | ||
---|---|---|
| ||
#!/bin/bash #SBATCH --account=<account> # Your account #SBATCH --time =0:10:00 #SBATCH -p mpp #SBATCH -N 2 #SBATCH --tasks-per-node =8 #SBATCH --cpus-per-task =16 #SBATCH --job-name=hybrid #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread module purge module load xthi/1.0-intel-oneapi-mpi2021.6.0-oneapi2022.1.0 intel-oneapi-mpi # module load xthi/1.0-openmpi4.1.3-gcc8.5.0 openmpi/4.1.3 ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # export OMP_STACKSIZE=128M # This binds each thread to one core export OMP_PROC_BIND=TRUE # OpenMP and srun, both need to know the number of CPUs per task export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK export SRUN_CPUS_PER_TASK=$SLURM_CPUS_PER_TASK srun xthi | sort -g -k 4 |
...
Code Block | ||
---|---|---|
| ||
#!/bin/bash #SBATCH --account=<account> # Your account #SBATCH --time =0:10:00 #SBATCH -p gpu #SBATCH --ntasks=1 #SBATCH --gpus=a100:2 # allocate 2 (out of 4) A100 GPUs; to get 2 (out of 2) A40 GPUs use --gpus=a40:2 #SBATCH --hint=nomultithread #SBATCH --job-name=gpu #SBATCH --output=out_%x.%j # disable hyperthreading #SBATCH --hint=nomultithread ## Uncomment the following line to enlarge the stacksize if needed, ## e.g., if your code crashes with a spurious segmentation fault. # ulimit -s unlimited # To be on the safe side, we emphasize that it is pure MPI, no OpenMP threads export OMP_NUM_THREADS=1 srun your_code_that_runs_on_GPUs |
...