JOB DETAILS FOR JOB 444707

captured Wed Jan 14 20:38:01 SAST 2026

MaxRAM=0G
MaxVMSize=0G
AveCPUFreq=
AveDiskRead=
AveDiskWrite=

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7519 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=RUNNING Reason=None Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=0:0
   DerivedExitCode=0:0
   RunTime=00:03:19 TimeLimit=00:30:00 TimeMin=N/A
   SubmitTime=2026-01-14T20:34:42 EligibleTime=2026-01-14T20:34:42
   AccrueTime=2026-01-14T20:34:42
   StartTime=2026-01-14T20:34:42 EndTime=2026-01-14T21:04:42 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-01-14T20:34:42 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:1449571
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=4 NumTasks=1 CPUs/Task=4 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1
   AllocTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1,gres/gpu:ampere=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:1
     Nodes=srvrocgpu010 CPU_IDs=4-5,32-33 Mem=16384 GRES=gpu:ampere:1(IDX:2)
   MinCPUsNode=4 MinMemoryNode=16G MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/install_cuda_with_pytorch27.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:4
   

UCT HPC clusterJOB DETAILS FOR JOB 444707

captured Wed Jan 14 20:38:33 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7519 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=FAILED Reason=NonZeroExitCode Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=1:0
   DerivedExitCode=0:0
   RunTime=00:03:20 TimeLimit=00:30:00 TimeMin=N/A
   SubmitTime=2026-01-14T20:34:42 EligibleTime=2026-01-14T20:34:42
   AccrueTime=2026-01-14T20:34:42
   StartTime=2026-01-14T20:34:42 EndTime=2026-01-14T20:38:02 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-01-14T20:34:42 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:1449571
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=4 NumTasks=1 CPUs/Task=4 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1
   AllocTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1,gres/gpu:ampere=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:1
     Nodes=srvrocgpu010 CPU_IDs=4-5,32-33 Mem=16384 GRES=
   MinCPUsNode=4 MinMemoryNode=16G MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/install_cuda_with_pytorch27.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:4
   

UCT HPC clusterJOB DETAILS FOR JOB 444707

captured Wed Jan 14 20:39:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7519 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=FAILED Reason=NonZeroExitCode Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=1:0
   DerivedExitCode=0:0
   RunTime=00:03:20 TimeLimit=00:30:00 TimeMin=N/A
   SubmitTime=2026-01-14T20:34:42 EligibleTime=2026-01-14T20:34:42
   AccrueTime=2026-01-14T20:34:42
   StartTime=2026-01-14T20:34:42 EndTime=2026-01-14T20:38:02 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-01-14T20:34:42 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:1449571
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=4 NumTasks=1 CPUs/Task=4 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1
   AllocTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1,gres/gpu:ampere=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:1
     Nodes=srvrocgpu010 CPU_IDs=4-5,32-33 Mem=16384 GRES=
   MinCPUsNode=4 MinMemoryNode=16G MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/install_cuda_with_pytorch27.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:4
   

UCT HPC clusterJOB DETAILS FOR JOB 444707

captured Wed Jan 14 20:39:33 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7519 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=FAILED Reason=NonZeroExitCode Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=1:0
   DerivedExitCode=0:0
   RunTime=00:03:20 TimeLimit=00:30:00 TimeMin=N/A
   SubmitTime=2026-01-14T20:34:42 EligibleTime=2026-01-14T20:34:42
   AccrueTime=2026-01-14T20:34:42
   StartTime=2026-01-14T20:34:42 EndTime=2026-01-14T20:38:02 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-01-14T20:34:42 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:1449571
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=4 NumTasks=1 CPUs/Task=4 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1
   AllocTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1,gres/gpu:ampere=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:1
     Nodes=srvrocgpu010 CPU_IDs=4-5,32-33 Mem=16384 GRES=
   MinCPUsNode=4 MinMemoryNode=16G MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/install_cuda_with_pytorch27.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:4
   

UCT HPC clusterJOB DETAILS FOR JOB 444707

captured Wed Jan 14 20:40:01 SAST 2026

lmbanr001(733329384 Anri Lombard) GroupId=eresearch_hpc_users(1221947160) MCS_label=N/A
   Priority=7519 Nice=0 Account=nlpgroup QOS=nlpgroup
   JobState=FAILED Reason=NonZeroExitCode Dependency=(null)
   Requeue=0 Restarts=0 BatchFlag=1 Reboot=0 ExitCode=1:0
   DerivedExitCode=0:0
   RunTime=00:03:20 TimeLimit=00:30:00 TimeMin=N/A
   SubmitTime=2026-01-14T20:34:42 EligibleTime=2026-01-14T20:34:42
   AccrueTime=2026-01-14T20:34:42
   StartTime=2026-01-14T20:34:42 EndTime=2026-01-14T20:38:02 Deadline=N/A
   SuspendTime=None SecsPreSuspend=0 LastSchedEval=2026-01-14T20:34:42 Scheduler=Main
   Partition=a100 AllocNode:Sid=srvrochpc001:1449571
   ReqNodeList=(null) ExcNodeList=(null)
   NodeList=srvrocgpu010
   BatchHost=srvrocgpu010
   NumNodes=1 NumCPUs=4 NumTasks=1 CPUs/Task=4 ReqB:S:C:T=0:0:*:*
   ReqTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1
   AllocTRES=cpu=4,mem=16G,node=1,billing=6,gres/gpu=1,gres/gpu:ampere=1
   Socks/Node=* NtasksPerN:B:S:C=0:0:*:* CoreSpec=*
   JOB_GRES=gpu:ampere:1
     Nodes=srvrocgpu010 CPU_IDs=4-5,32-33 Mem=16384 GRES=
   MinCPUsNode=4 MinMemoryNode=16G MinTmpDiskNode=0
   Features=(null) DelayBoot=00:00:00
   OverSubscribe=OK Contiguous=0 Licenses=(null) Network=(null)
   Command=/home/lmbanr001/masters/sallm/scripts/install_cuda_with_pytorch27.sh
   WorkDir=/home/lmbanr001/masters/sallm
   StdErr=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   StdIn=/dev/null
   StdOut=/home/lmbanr001/masters/sallm/logs/cuda-pt27-444707.out
   Power=
   TresPerNode=gres/gpu:1
   TresPerTask=cpu:4