JOB DETAILS FOR JOB 798141

captured Sat May 2 20:39:01 SAST 2026

MaxRAM=3.1G
MaxVMSize=0G
AveCPUFreq=2.80M
AveDiskRead=1781217971
AveDiskWrite=263465744

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7733 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:14:28 TimeLimit=08:00:00 TimeMin=N/A
   SubmitTime=2026-05-02T20:18:56 EligibleTime=2026-05-02T20:18:56
   AccrueTime=2026-05-02T20:18:56
   StartTime=2026-05-02T20:24:33 EndTime=2026-05-03T04:24:33 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-05-02T20:24:33 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3422690
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu016
   BatchHost=srvrocgpu016
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   AllocTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:2
     Nodes=srvrocgpu016 CPU_IDs=0-7 Mem=80000 GRES=gpu:l40s:2(IDX:0-1)
   MinCPUsNode=8 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_evaluation.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-798141.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-798141.out
   Power=
   TresPerNode=gres/gpu:l40s:2
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 798141

captured Sat May 2 20:39:38 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7733 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:15:01 TimeLimit=08:00:00 TimeMin=N/A
   SubmitTime=2026-05-02T20:18:56 EligibleTime=2026-05-02T20:18:56
   AccrueTime=2026-05-02T20:18:56
   StartTime=2026-05-02T20:24:33 EndTime=2026-05-02T20:39:34 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-05-02T20:24:33 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3422690
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu016
   BatchHost=srvrocgpu016
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   AllocTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:2
     Nodes=srvrocgpu016 CPU_IDs=0-7 Mem=80000 GRES=
   MinCPUsNode=8 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_evaluation.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-798141.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-798141.out
   Power=
   TresPerNode=gres/gpu:l40s:2
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 798141

captured Sat May 2 20:40:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7733 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:15:01 TimeLimit=08:00:00 TimeMin=N/A
   SubmitTime=2026-05-02T20:18:56 EligibleTime=2026-05-02T20:18:56
   AccrueTime=2026-05-02T20:18:56
   StartTime=2026-05-02T20:24:33 EndTime=2026-05-02T20:39:34 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-05-02T20:24:33 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3422690
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu016
   BatchHost=srvrocgpu016
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   AllocTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:2
     Nodes=srvrocgpu016 CPU_IDs=0-7 Mem=80000 GRES=
   MinCPUsNode=8 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_evaluation.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-798141.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-798141.out
   Power=
   TresPerNode=gres/gpu:l40s:2
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 798141

captured Sat May 2 20:40:39 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7733 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:15:01 TimeLimit=08:00:00 TimeMin=N/A
   SubmitTime=2026-05-02T20:18:56 EligibleTime=2026-05-02T20:18:56
   AccrueTime=2026-05-02T20:18:56
   StartTime=2026-05-02T20:24:33 EndTime=2026-05-02T20:39:34 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-05-02T20:24:33 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3422690
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu016
   BatchHost=srvrocgpu016
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   AllocTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:2
     Nodes=srvrocgpu016 CPU_IDs=0-7 Mem=80000 GRES=
   MinCPUsNode=8 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_evaluation.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-798141.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-798141.out
   Power=
   TresPerNode=gres/gpu:l40s:2
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 798141

captured Sat May 2 20:41:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7733 Nice=0 Account=l40sfree QOS=l40sfree
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:15:01 TimeLimit=08:00:00 TimeMin=N/A
   SubmitTime=2026-05-02T20:18:56 EligibleTime=2026-05-02T20:18:56
   AccrueTime=2026-05-02T20:18:56
   StartTime=2026-05-02T20:24:33 EndTime=2026-05-02T20:39:34 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-05-02T20:24:33 Scheduler=Main
   Partition=l40s AllocNode:Sid=srvrochpc001:3422690
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu016
   BatchHost=srvrocgpu016
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=8 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   AllocTRES=cpu=8,mem=80000M,node=1,billing=15,gres/gpu=2,gres/gpu:l40s=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:l40s:2
     Nodes=srvrocgpu016 CPU_IDs=0-7 Mem=80000 GRES=
   MinCPUsNode=8 MinMemoryCPU=10000M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_evaluation.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-798141.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-798141.out
   Power=
   TresPerNode=gres/gpu:l40s:2
   TresPerTask=cpu:8
   MailUser=lmbanr001 MailType=END,FAIL