JOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:51:32 SAST 2025

MaxRAM=6.6G
MaxVMSize=0G
AveCPUFreq=244K
AveDiskRead=3531873254
AveDiskWrite=2559830435

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:33 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2026-01-01T07:37:59 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=gpu:ampere:2(IDX:0-1)
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:52:01 SAST 2025

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:38 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2025-12-30T08:51:37 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:52:32 SAST 2025

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:38 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2025-12-30T08:51:37 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:53:01 SAST 2025

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:38 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2025-12-30T08:51:37 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:53:32 SAST 2025

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:38 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2025-12-30T08:51:37 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL
   

UCT HPC clusterJOB DETAILS FOR JOB 412272

captured Tue Dec 30 08:54:01 SAST 2025

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=8185 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=COMPLETED Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=01:13:38 TimeLimit=2-00:00:00 TimeMin=N/A
   SubmitTime=2025-12-30T06:41:56 EligibleTime=2025-12-30T06:41:56
   AccrueTime=2025-12-30T06:41:59
   StartTime=2025-12-30T07:37:59 EndTime=2025-12-30T08:51:37 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2025-12-30T07:37:59 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:2006587
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=8 NumTasks=1 CPUs/Task=1 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=1,mem=9142M,node=1,billing=3,gres/gpu=2
   AllocTRES=cpu=8,mem=73136M,node=1,billing=17,gres/gpu=2,gres/gpu:ampere=2
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:2
     Nodes=srvrocgpu010 CPU_IDs=0-3,28-31 Mem=73136 GRES=
   MinCPUsNode=1 MinMemoryCPU=9142M MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/launch_finetune.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/slurm-412272.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/slurm-412272.out
   Power=
   CpusPerTres=gres/gpu:4
   TresPerNode=gres/gpu:2
   MailUser=LMBANR001@myuct.ac.za MailType=END,FAIL