JOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:49:35 SAST 2026

MaxRAM=4.9G
MaxVMSize=0G
AveCPUFreq=2K
AveDiskRead=7995402949
AveDiskWrite=2041730387

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=13:02:21 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-26T08:47:14 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=gpu:amperemk:1(IDX:0)
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:50:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=CANCELLED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:15
   DerivedExitCode=0:0
   RunTime=13:02:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-24T21:49:55 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:50:35 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=CANCELLED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:15
   DerivedExitCode=0:0
   RunTime=13:02:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-24T21:49:55 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:51:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=CANCELLED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:15
   DerivedExitCode=0:0
   RunTime=13:02:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-24T21:49:55 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:51:35 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=CANCELLED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:15
   DerivedExitCode=0:0
   RunTime=13:02:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-24T21:49:55 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 774949

captured Fri Apr 24 21:52:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7702 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=CANCELLED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:15
   DerivedExitCode=0:0
   RunTime=13:02:41 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2026-04-24T08:47:04 EligibleTime=2026-04-24T08:47:04
   AccrueTime=2026-04-24T08:47:06
   StartTime=2026-04-24T08:47:14 EndTime=2026-04-24T21:49:55 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-04-24T08:47:14 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:3178478
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu009
   BatchHost=srvrocgpu009
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1
   AllocTRES=cpu=8,mem=73136M,node=1,billing=16,gres/gpu=1,gres/gpu:amperemk=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:amperemk:1
     Nodes=srvrocgpu009 CPU_IDs=0-1,3-8 Mem=73136 GRES=
   MinCPUsNode=8 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_hpo.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-774949.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-774949.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL