$ kinit alpays@stanford.edu this #this will get you 24 hour password free access

$ ssh -Y alpays@login.sherlock.stanford.edu #for accessing login nodes on Sherlock2

$ ssh -Y alpays@sherlock.stanford.edu #for accessing login nodes on Sherlock1 (retired)

#Use Data Transfer Node on Sherlock2 to transfer large dataset

#Login to the host computer and take a kinit ticket at the host computer for alpays@stanford.edu

$ ssh alpays@ocio-gpu01.slac.stanford.edu

$ kinit alpays@stanford.edu

$ scp -r /gpfs/slac/cryo/fs1/exp/directory alpays@dtn.sherlock.stanford.edu:/oak/stanford/groups/yiorgo/lana/Alpay/Project_directory

$ cd /oak/stanford/groups/yiorgo/lana/Alpay #change directory to unlimited file storage area

$ ml biology relion #to load Relion variables from Sherlock2

$ relion #to start Relion

$ sbatch scriptname.bash #to submit jobs

$ squeue -u alpays #to check your jobs

$ squeue -p yiorgo #to check partition yiorgo nodes (or try normal, GPU, owners, bigmem etc for other partitions.

$ sinfo -p yiorgo -e -o "%.10R %.8D %.10m %.5c %7z %8G %110f" #to get info from partition yiorgo (512GB Ram, 20x CPU - E5 2640v4 2.40GHz, 4x GPU - TITAN-Xp 12GB CC:6.1)

$ salloc -p yiorgo -n 5 --nodes=1 --gres=gpu:2 --mem=200G --time=12:00:00 --exclude=sh-114-09,sh-114-10, echo $SLURM_NODELIST
# --nodelist=sh-113-10,sh-113-11 to request only from these nodes

$ ssh sh-113-10 #(or 11) to login to the requested node</pre>
$ srun -n 4 -c 8 ./my_favorite_command

$ scontrol show job <job-id>
$ sstat --format=AveCPU,AvePages,AveRSS,AveVMSize,JobID -j <job-id> --allsteps
$ sacct -j <jobid> --format=JobID,JobName,MaxRSS,Elapsed</pre>
<pre>
Storage Limits: 15GB user $HOME and 1TB PI_HOME

https://slurm.schedmd.com/salloc.html

https://slurm.schedmd.com/sinfo.html

https://slurm.schedmd.com/scontrol.html

https://ubccr.freshdesk.com/support/solutions/articles/5000688140-submitting-a-slurm-job-script

Convenient SLURM Commands