Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Expand
titleSlurm submission script
Code Block
#!/bin/bash

#SBATCH --job-name=pyt-multi_gpu
#SBATCH --account=<project-name>
#SBATCH --time=5:00
#SBATCH --nodes=1
#SBATCH --cpus-per-task=2
#SBATCH --mail-type=ALL
#SBATCH --mail-user=<email-address>
#SBATCH --output=pyt_multi_gpu_%A.out
#SBATCH --partition=<gpu-partition>
#SBATCH --gres=gpu:<num-of-gpus>

export OMP_NUM_THREADS=1
# Uncomment for NCCL related logging.
# export NCCL_DEBUG=INFO

echo "SLURM_JOB_ID:" $SLURM_JOB_ID
echo "SLURM_JOB_NODELIST:" $SLURM_JOB_NODELIST
echo "SLURM_GPUS:" $SLURM_GPUS
echo "SLURM_GPUS_ON_NODE:" $SLURM_GPUS_ON_NODE
echo "SLURM_JOB_GPUS:" $SLURM_JOB_GPUS
echo "CUDA_VISIBLE_DEVICES:" $CUDA_VISIBLE_DEVICES

# Environment Variable sedused within the code.
# Sets a random port to potentially allow different jobs to 
# run on the same GPU device.
export MASTER_PORT=$(expr 10000 + $(echo -n $SLURM_JOBID | tail -c 4))
echo "MASTER_PORT="$MASTER_PORT

# List GPU devices allocated.
nvidia-smi -L

module purge
module load miniconda3/<version>
conda activate <path-to-conda-environmnet>

# Monitor GPU usage
# Run this process in the background.
nvidia-smi \
--query-gpu=timestamp,count,gpu_name,gpu_uuid,utilization.gpu,utilization.memory,memory.total,memory.reserved,memory.used,memory.free,temperature.gpu,temperature.memory \
--format=csv -l 1 \
>  &
echo "Writing nvidia-smi to: gpu_usage.csv"

python multi_gpu.py 50 10

echo "Done."