32 lines
1.5 KiB
Bash
32 lines
1.5 KiB
Bash
|
#!/bin/bash
|
||
|
ProcID="$1"
|
||
|
ClusterID="$2"
|
||
|
# More or less gneric PI LHCb HTCondor script, to run something within singularity
|
||
|
#
|
||
|
# Where we should work
|
||
|
working_dir=/home/lhcb/kopecna/B2KstarMuMu/code/ewp-Bplus2Kstmumu-AngAna/FCNCfitter/condor
|
||
|
# What start in singularity (current directory will be current in singularity as well)
|
||
|
to_run="/home/lhcb/kopecna/B2KstarMuMu/code/ewp-Bplus2Kstmumu-AngAna/FCNCfitter/condor/job.sh"
|
||
|
|
||
|
|
||
|
# Singularity image to use (can point to CERN CVMFS...)
|
||
|
# Can be constructer by yourself... like
|
||
|
# OS_IMAGE=/work/zhelezov/singularity/CentOS7.simg
|
||
|
# Taken from "generic" CERN, unfortunately without graphic libraries...
|
||
|
#OS_IMAGE=/cvmfs/unpacked.cern.ch/registry.hub.docker.com/library/centos\:centos7
|
||
|
# That seems like work for the perpose at the moment... Not perfect, I know.
|
||
|
OS_IMAGE=/cvmfs/unpacked.cern.ch/registry.hub.docker.com/cmssw/cc7\:amd64
|
||
|
|
||
|
# We will work here. Local Condor has no "shared filesystem" (yet, unclear if it should/will),
|
||
|
# so jobs start in a dedicated local directory on working nodes.
|
||
|
cd $working_dir || exit 1
|
||
|
# That defines where is singularity
|
||
|
export PATH=/work/software/singularity/latest/`/work/software/os_version`/bin:$PATH
|
||
|
# Just for info under which OS we are running
|
||
|
echo "Batch execution OS: `/work/software/os_version`"
|
||
|
#In case cvmfs is not available for whatever reason
|
||
|
while [ ! -e "$OS_IMAGE" ]; do echo "Waiting for image..." >&1; /bin/sleep 300; done
|
||
|
# That starts singularity with image and final script
|
||
|
exec singularity exec --bind /cvmfs,/auto,/auto/home:/home,/auto/work:/work $OS_IMAGE $to_run "$@"
|
||
|
|