JOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:24:32 SAST 2025

MaxRAM=2.4G
MaxVMSize=0G
AveCPUFreq=18K
AveDiskRead=417142199
AveDiskWrite=1188970641

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:19 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-09T17:41:13 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=gpu:ampere:2(IDX:1-2)
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2
   

UCT HPC clusterJOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:25:01 SAST 2025

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:29 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-08T04:24:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2
   

UCT HPC clusterJOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:25:32 SAST 2025

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:29 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-08T04:24:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2
   

UCT HPC clusterJOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:26:01 SAST 2025

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:29 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-08T04:24:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2
   

UCT HPC clusterJOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:26:32 SAST 2025

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:29 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-08T04:24:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2
   

UCT HPC clusterJOB DETAILS FOR JOB 70009

captured Sat Feb 8 04:27:01 SAST 2025

JobId=70009 JobName=T5Train
   UserId=01452389(465061273 Zola Mahlaza) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8141 Nice=0 Account=a100free QOS=a100free
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=1 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=10:43:29 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-02-07T17:41:13 EligibleTime=2025-02-07T17:41:13
   AccrueTime=2025-02-07T17:41:13
   StartTime=2025-02-07T17:41:13 EndTime=2025-02-08T04:24:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-02-07T17:41:13 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001.uct.ac.za:2748026
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=2 NumTasks=2 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   AllocTRES=cpu=2,mem=18284M,node=1,billing=5,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=1,29 Mem=18284 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/01452389/Phon cond model/T5Based-model/HPCScript.sh
   WorkDir=/home/01452389/Phon cond model/T5Based-model
   StdErr=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   StdIn=/dev/null
   StdOut=/home/01452389/Phon cond model/T5Based-model/slurm-70009.out
   Power=
   TresPerNode=gres/gpu:ampere:2