JOB DETAILS FOR JOB 501803

captured Mon Feb 2 16:24:01 SAST 2026

MaxRAM=3G
MaxVMSize=0G
AveCPUFreq=3.22M
AveDiskRead=327237767
AveDiskWrite=6420794

mrdham001(1221948932 Hamidreza Moradi) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7069 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:00:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-02-02T16:23:20 EligibleTime=2026-02-02T16:23:20
   AccrueTime=2026-02-02T16:23:20
   StartTime=2026-02-02T16:23:20 EndTime=2026-02-04T16:23:20 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-02-02T16:23:20 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3954041
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu014
   BatchHost=srvrocgpu014
   NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   AllocTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:1
     Nodes=srvrocgpu014 CPU_IDs=25 Mem=10000 GRES=gpu:l40s:1(IDX:1)
   MinCPUsNode=1 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/mrdham001/run_FloeModel.sh
   WorkDir=/home/mrdham001
   StdErr=/home/mrdham001/slurm-501803.out
   StdIn=/dev/null
   StdOut=/home/mrdham001/slurm-501803.out
   Power=
   TresPerNode=gres/gpu:l40s:1
   

UCT HPC clusterJOB DETAILS FOR JOB 501803

captured Mon Feb 2 16:24:34 SAST 2026

mrdham001(1221948932 Hamidreza Moradi) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7069 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=OUT_OF_MEMORY Reason=OutOfMemory Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:125
   DerivedExitCode=0:0
   RunTime=00:00:45 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-02-02T16:23:20 EligibleTime=2026-02-02T16:23:20
   AccrueTime=2026-02-02T16:23:20
   StartTime=2026-02-02T16:23:20 EndTime=2026-02-02T16:24:05 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-02-02T16:23:20 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3954041
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu014
   BatchHost=srvrocgpu014
   NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   AllocTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:1
     Nodes=srvrocgpu014 CPU_IDs=25 Mem=10000 GRES=
   MinCPUsNode=1 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/mrdham001/run_FloeModel.sh
   WorkDir=/home/mrdham001
   StdErr=/home/mrdham001/slurm-501803.out
   StdIn=/dev/null
   StdOut=/home/mrdham001/slurm-501803.out
   Power=
   TresPerNode=gres/gpu:l40s:1
   

UCT HPC clusterJOB DETAILS FOR JOB 501803

captured Mon Feb 2 16:25:01 SAST 2026

mrdham001(1221948932 Hamidreza Moradi) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7069 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=OUT_OF_MEMORY Reason=OutOfMemory Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:125
   DerivedExitCode=0:0
   RunTime=00:00:45 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-02-02T16:23:20 EligibleTime=2026-02-02T16:23:20
   AccrueTime=2026-02-02T16:23:20
   StartTime=2026-02-02T16:23:20 EndTime=2026-02-02T16:24:05 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-02-02T16:23:20 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3954041
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu014
   BatchHost=srvrocgpu014
   NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   AllocTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:1
     Nodes=srvrocgpu014 CPU_IDs=25 Mem=10000 GRES=
   MinCPUsNode=1 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/mrdham001/run_FloeModel.sh
   WorkDir=/home/mrdham001
   StdErr=/home/mrdham001/slurm-501803.out
   StdIn=/dev/null
   StdOut=/home/mrdham001/slurm-501803.out
   Power=
   TresPerNode=gres/gpu:l40s:1
   

UCT HPC clusterJOB DETAILS FOR JOB 501803

captured Mon Feb 2 16:25:35 SAST 2026

mrdham001(1221948932 Hamidreza Moradi) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7069 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=OUT_OF_MEMORY Reason=OutOfMemory Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:125
   DerivedExitCode=0:0
   RunTime=00:00:45 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-02-02T16:23:20 EligibleTime=2026-02-02T16:23:20
   AccrueTime=2026-02-02T16:23:20
   StartTime=2026-02-02T16:23:20 EndTime=2026-02-02T16:24:05 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-02-02T16:23:20 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3954041
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu014
   BatchHost=srvrocgpu014
   NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   AllocTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:1
     Nodes=srvrocgpu014 CPU_IDs=25 Mem=10000 GRES=
   MinCPUsNode=1 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/mrdham001/run_FloeModel.sh
   WorkDir=/home/mrdham001
   StdErr=/home/mrdham001/slurm-501803.out
   StdIn=/dev/null
   StdOut=/home/mrdham001/slurm-501803.out
   Power=
   TresPerNode=gres/gpu:l40s:1
   

UCT HPC clusterJOB DETAILS FOR JOB 501803

captured Mon Feb 2 16:26:01 SAST 2026

mrdham001(1221948932 Hamidreza Moradi) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7069 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=OUT_OF_MEMORY Reason=OutOfMemory Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:125
   DerivedExitCode=0:0
   RunTime=00:00:45 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-02-02T16:23:20 EligibleTime=2026-02-02T16:23:20
   AccrueTime=2026-02-02T16:23:20
   StartTime=2026-02-02T16:23:20 EndTime=2026-02-02T16:24:05 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-02-02T16:23:20 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3954041
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu014
   BatchHost=srvrocgpu014
   NumNodes=1 NumCPUs=1 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   AllocTRES=cpu=1,mem=10000M,node=1,billing=1,gres/gpu=1,gres/gpu:l40s=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:1
     Nodes=srvrocgpu014 CPU_IDs=25 Mem=10000 GRES=
   MinCPUsNode=1 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/mrdham001/run_FloeModel.sh
   WorkDir=/home/mrdham001
   StdErr=/home/mrdham001/slurm-501803.out
   StdIn=/dev/null
   StdOut=/home/mrdham001/slurm-501803.out
   Power=
   TresPerNode=gres/gpu:l40s:1