Amber
Amber (Assisted Model Building with Energy Refinement) is a family of force fields and molecular simulation software. Amber sander and Amber pmemd MPI (academic version) are available on duhpc. Amber 24 with pmemd.cuda achieves 622 ns/day on the V100 GPU — the fastest MD engine available on the cluster.
Template for Amber CPU Job (amber_cpu.sh):
#!/bin/bash
#============================================================
# Amber pmemd.MPI CPU Job Template - duhpc Cluster
# Use for: energy minimization, heating, equilibration
# Usage: sbatch amber_cpu.sh
#============================================================
#SBATCH --job-name=amber_cpu # Job name (change this)
#SBATCH --partition=compute # CPU compute partition
#SBATCH --nodes=1
#SBATCH --ntasks=8 # MPI tasks (max 48 for one node)
#SBATCH --cpus-per-task=1
#SBATCH --mem=16G
#SBATCH --time=3-00:00:00 # Max time (days-hours:min:sec)
#SBATCH --output=%x_%j.out
#SBATCH --error=%x_%j.err
#SBATCH --mail-type=BEGIN,END,FAIL
#SBATCH --mail-user=your@email.com # Change to your email
#------------------------------------------------------------
# USER SETTINGS - Edit these for your simulation
#------------------------------------------------------------
# Choose ONE step to run - comment out the others
# STEP 1 - Energy Minimization
RUN_MIN=yes
INPUT_MIN="min.in"
INPCRD_MIN="system.inpcrd"
OUTPUT_MIN="min"
# STEP 2 - Heating NVT
RUN_HEAT=yes
INPUT_HEAT="heat.in"
INPCRD_HEAT="min.rst"
OUTPUT_HEAT="heat"
# STEP 3 - Equilibration NPT
RUN_EQUIL=yes
INPUT_EQUIL="equil.in"
INPCRD_EQUIL="heat.rst"
OUTPUT_EQUIL="equil"
# Topology file (used for all steps)
PRMTOP="system.prmtop"
#------------------------------------------------------------
echo "============================================="
echo "Amber pmemd.MPI CPU Job - duhpc Cluster"
echo "Job ID : $SLURM_JOBID"
echo "User : $USER"
echo "Node : $SLURMD_NODENAME"
echo "MPI tasks: $SLURM_NTASKS"
echo "Start : $(date)"
echo "============================================="
# ── Load Amber 24 ─────────────────────────────────────────
source /scratch/apps/amber/amber24/amber.sh
export LD_LIBRARY_PATH=/opt/ohpc/pub/libs/gnu8/openblas/0.3.7/lib:\
/opt/ohpc/pub/compiler/gcc/8.3.0/lib64:\
/opt/ohpc/pub/mpi/openmpi3-gnu8/3.1.4/lib:\
$LD_LIBRARY_PATH
MPI_RUN=/opt/ohpc/pub/mpi/openmpi3-gnu8/3.1.4/bin/mpirun
echo "Amber : pmemd.MPI"
echo "MPI : $($MPI_RUN --version 2>&1 | head -1)"
echo ""
# ── STEP 1: Energy Minimization ───────────────────────────
if [ "$RUN_MIN" = "yes" ]; then
echo "--- STEP 1: Energy Minimization ---"
for f in $INPUT_MIN $PRMTOP $INPCRD_MIN; do
[ ! -f "$f" ] && echo "ERROR: Missing $f" && exit 1
done
START=$(date +%s)
$MPI_RUN -np $SLURM_NTASKS pmemd.MPI -O \
-i $INPUT_MIN \
-p $PRMTOP \
-c $INPCRD_MIN \
-o ${OUTPUT_MIN}.out \
-r ${OUTPUT_MIN}.rst \
-ref $INPCRD_MIN
[ $? -ne 0 ] && echo "ERROR: Minimization failed!" && exit 1
echo "Minimization done in $(($(date +%s)-START)) seconds"
echo ""
fi
# ── STEP 2: Heating ───────────────────────────────────────
if [ "$RUN_HEAT" = "yes" ]; then
echo "--- STEP 2: Heating 0K to 300K ---"
for f in $INPUT_HEAT $PRMTOP $INPCRD_HEAT; do
[ ! -f "$f" ] && echo "ERROR: Missing $f" && exit 1
done
START=$(date +%s)
$MPI_RUN -np $SLURM_NTASKS pmemd.MPI -O \
-i $INPUT_HEAT \
-p $PRMTOP \
-c $INPCRD_HEAT \
-o ${OUTPUT_HEAT}.out \
-r ${OUTPUT_HEAT}.rst \
-x ${OUTPUT_HEAT}.nc \
-ref $INPCRD_HEAT
[ $? -ne 0 ] && echo "ERROR: Heating failed!" && exit 1
echo "Heating done in $(($(date +%s)-START)) seconds"
echo ""
fi
# ── STEP 3: Equilibration ─────────────────────────────────
if [ "$RUN_EQUIL" = "yes" ]; then
echo "--- STEP 3: Equilibration NPT ---"
for f in $INPUT_EQUIL $PRMTOP $INPCRD_EQUIL; do
[ ! -f "$f" ] && echo "ERROR: Missing $f" && exit 1
done
START=$(date +%s)
$MPI_RUN -np $SLURM_NTASKS pmemd.MPI -O \
-i $INPUT_EQUIL \
-p $PRMTOP \
-c $INPCRD_EQUIL \
-o ${OUTPUT_EQUIL}.out \
-r ${OUTPUT_EQUIL}.rst \
-x ${OUTPUT_EQUIL}.nc
[ $? -ne 0 ] && echo "ERROR: Equilibration failed!" && exit 1
echo "Equilibration done in $(($(date +%s)-START)) seconds"
echo ""
fi
echo "============================================="
echo "ALL STEPS COMPLETED SUCCESSFULLY"
echo "Next step: submit amber_gpu.sh for production MD"
echo "End time : $(date)"
echo "============================================="
Template for Amber GPU Job (amber_gpu.sh):
#!/bin/bash
#============================================================
# Amber pmemd.cuda GPU Job Template - duhpc Cluster
# Copy this file, edit the USER SETTINGS section, and submit
# Usage: sbatch amber_gpu.sh
#============================================================
#SBATCH --job-name=amber_gpu # Job name (change this)
#SBATCH --partition=gpu # GPU partition - DO NOT CHANGE
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --gres=gpu:1 # Request 1 GPU - DO NOT CHANGE
#SBATCH --mem=16G
#SBATCH --time=2-00:00:00 # Max time (days-hours:min:sec)
#SBATCH --output=%x_%j.out # Output file (%x=jobname, %j=jobid)
#SBATCH --error=%x_%j.err
#SBATCH --mail-type=BEGIN,END,FAIL
#SBATCH --mail-user=your@email.com # Change to your email
#------------------------------------------------------------
# USER SETTINGS - Edit these for your simulation
#------------------------------------------------------------
INPUT="prod.in" # MD input file
PRMTOP="system.prmtop" # Topology file
INPCRD="equil.rst" # Input coordinates/restart file
OUTPUT="prod" # Output prefix (prod.out, prod.nc etc)
#------------------------------------------------------------
echo "============================================="
echo "Amber pmemd.cuda GPU Job - duhpc Cluster"
echo "Job ID : $SLURM_JOBID"
echo "User : $USER"
echo "Node : $SLURMD_NODENAME"
echo "CPUs : $SLURM_CPUS_PER_TASK"
echo "Start : $(date)"
echo "============================================="
# ── Load Amber 24 ─────────────────────────────────────────
source /scratch/apps/amber/amber24/amber.sh
export LD_LIBRARY_PATH=/usr/local/cuda-11.4/lib64:\
/opt/ohpc/pub/libs/gnu8/openblas/0.3.7/lib:\
/opt/ohpc/pub/compiler/gcc/8.3.0/lib64:\
$LD_LIBRARY_PATH
# ── Verify environment ────────────────────────────────────
echo "Amber : $(pmemd.cuda --version 2>&1)"
echo "GPU : $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)"
echo ""
# ── Check input files ─────────────────────────────────────
for f in $INPUT $PRMTOP $INPCRD; do
if [ ! -f "$f" ]; then
echo "ERROR: Missing input file: $f"
exit 1
fi
done
echo "Input files: OK"
echo ""
# ── Run pmemd.cuda ────────────────────────────────────────
echo "--- Starting GPU MD ---"
START=$(date +%s)
pmemd.cuda -O \
-i $INPUT \
-p $PRMTOP \
-c $INPCRD \
-o ${OUTPUT}.out \
-r ${OUTPUT}.rst \
-x ${OUTPUT}.nc \
-inf ${OUTPUT}.mdinfo
EXIT=$?
END=$(date +%s)
# ── Performance Report ────────────────────────────────────
echo ""
echo "============================================="
echo "PERFORMANCE REPORT"
echo "============================================="
if [ $EXIT -eq 0 ]; then
echo "Status : SUCCESS"
echo "GPU : $(nvidia-smi --query-gpu=name --format=csv,noheader)"
echo "Wall time: $((END-START)) seconds"
echo ""
grep "ns/day" ${OUTPUT}.mdinfo 2>/dev/null
grep "ns/day" ${OUTPUT}.out 2>/dev/null | tail -3
else
echo "Status : FAILED (exit code $EXIT)"
echo "Check ${OUTPUT}.out for details"
fi
echo "End time : $(date)"
echo "============================================="
Getting Support
- Email to Mr. Imran Ghani on ighani[at]ducc[dot]du[dot]ac[dot]in for any duhpc related information.
