############################# #--container-save EXAMPLE # ############################# pull_container_to_file_docker_hub.sh ubuntu:18.04 srun --container-image ~/ubuntu+18.04.sqsh which file srun --container-image ~/ubuntu+18.04.sqsh --container-save ~/ubuntu+18.04.sqsh sh -c 'apt-get update && apt-get install -y file' srun --container-image ~/ubuntu+18.04.sqsh which file ---------------------------------------------------------------------------------- ############################# #--container-mount EXAMPLE # ############################# # mount dir srun --container-image ~/ubuntu+18.04.sqsh --container-mounts /home/$USER/data01:/data ls /data # mount file cat /etc/os-release srun --container-image ~/ubuntu+18.04.sqsh --container-mounts /etc/os-release:/host/os-release cat /host/os-release srun --container-image ~/ubuntu+18.04.sqsh --container-mounts /etc/os-release:/host/os-release cat /etc/os-release # If the source and destination are identical, you can use the short-form with a single path: srun --container-image ubuntu --container-mounts /mnt ls /mnt ---------------------------------------------------------------------------------- ##################################### # cuda container from NGC- EXAMPLE # ##################################### pull_container_to_file.sh nvcr.io/nvidia/cuda:11.3.0-base-ubuntu18.04 srun --gpus=2 --container-image ~/cuda:11.3.0-base-ubuntu18.04.sqsh nvidia-smi # In the output we will see only 2 gpus from the conatiner. It's because we allocate 2 gpus from the cluster. cat slurm_cuda.job #!/bin/bash #SBATCH --gpus=1 srun --container-image ~/cuda:11.3.0-base-ubuntu18.04.sqsh nvidia-smi ##################################### # MIG Alloc - EXAMPLE # ##################################### # Alloc 1g.5gb slice srun -p interactive-mig --gres=gpu:1g.5gb nvidia-smi # Alloc 2g.10gb slice srun -p interactive-mig --gres=gpu:2g.10gb nvidia-smi # Don't care which slice srun -p interactive-mig --gpus=1 nvidia-smi