Added lines to allow for running on GPUs in cluster.

This commit is contained in:
Karthik 2024-06-19 19:45:22 +02:00
parent a9e24bf295
commit cafe7eeb86
5 changed files with 49 additions and 4 deletions

View File

@ -25,6 +25,7 @@ OptionsStruct.NumberOfTimeSteps = 2E6; % in s
OptionsStruct.EnergyTolerance = 5E-10;
OptionsStruct.JobNumber = 1;
OptionsStruct.RunOnGPU = true;
OptionsStruct.SaveData = true;
OptionsStruct.SaveDirectory = './Data';
options = Helper.convertstruct2cell(OptionsStruct);

View File

@ -20,9 +20,8 @@ classdef DipolarGas < handle & matlab.mixin.Copyable
SimulationParameters;
%Flags
JobNumber;
RunOnGPU;
DebugMode;
DoSave;
SaveDirectory;
@ -64,6 +63,8 @@ classdef DipolarGas < handle & matlab.mixin.Copyable
@(x) assert(isnumeric(x) && isscalar(x) && (x > 0)));
addParameter(p, 'JobNumber', 1,...
@(x) assert(isnumeric(x) && isscalar(x) && (x > 0)));
addParameter(p, 'RunOnGPU', false,...
@islogical);
addParameter(p, 'DebugMode', false,...
@islogical);
addParameter(p, 'SaveData', false,...
@ -88,6 +89,7 @@ classdef DipolarGas < handle & matlab.mixin.Copyable
this.MinimumTimeStepSize = p.Results.MinimumTimeStepSize;
this.JobNumber = p.Results.JobNumber;
this.RunOnGPU = p.Results.RunOnGPU;
this.DebugMode = p.Results.DebugMode;
this.DoSave = p.Results.SaveData;
this.SaveDirectory = p.Results.SaveDirectory;

View File

@ -16,4 +16,8 @@ function [psi,V,VDk] = initialize(this,Params,Transf,TransfRad)
% == Setting up the initial wavefunction == %
psi = this.setupWavefunction(Params,Transf);
if this.RunOnGPU
psi = gpuArray(psi);
end
end

View File

@ -6,9 +6,9 @@
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=10
#SBATCH --mem=8G
#SBATCH --mem=24G
# Estimated wallclock time for job
#SBATCH --time=02:00:00
#SBATCH --time=15:00:00
#SBATCH --job-name=simulation
#SBATCH --error=simulation.err
#SBATCH --output=simulation.out

View File

@ -0,0 +1,38 @@
#!/bin/bash
########### Begin SLURM header ###########
#Partition
#SBATCH --partition=single
# Request number of nodes and GPU for job
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=10
#SBATCH --gres=gpu:4
#SBATCH --mem=24G
# Estimated wallclock time for job
#SBATCH --time=15:00:00
#SBATCH --job-name=simulation
#SBATCH --error=simulation.err
#SBATCH --output=simulation.out
########### End SLURM header ##########
echo "Working Directory: $PWD"
echo "Running on host $HOSTNAME"
echo "Job id: $SLURM_JOB_ID"
echo "Job name: $SLURM_JOB_NAME"
echo "Number of nodes allocated to job: $SLURM_JOB_NUM_NODES"
echo "Number of GPUs allocated to job: $SLURM_GPUS"
# Load module
module load math/matlab/R2023a
echo Directory is `pwd`
echo "Initiating Job..."
# Start a Matlab program
matlab -nodisplay -nosplash -r "Scripts.run_on_cluster"
# notice for tests
echo "Job terminated successfully"
exit