NAMD

单节点+cuda

#PBS -N apoa1_test
#PBS -l nodes=c005:ppn=20
#PBS -l walltime=240:00:00
#PBS -q dx360k20
#PBS -k oe

# default cuda should be set to cuda > 9.0
module load namd/2.14-multicore
cd $PBS_O_WORKDIR

namd2 +p$PBS_NP +setcpuaffinity +isomalloc_sync apoa1.namd > apoa1.log

跨节点+cuda

#PBS -N apoa1_test
#PBS -l nodes=3:ppn=20
#PBS -l walltime=240:00:00
#PBS -q dx360k20
#PBS -k oe

# default cuda should be set to cuda > 9.0
module load namd/2.14-verbs
cd $PBS_O_WORKDIR

rm -f nodelist
for node in `cat $PBS_NODEFILE | sort | uniq`
do
  echo "host ${node}-ib0" >> nodelist
done

charmrun ++usehostname ++ppn $[$PBS_NUM_PPN-1] namd2 +p $[$PBS_NP-$PBS_NUM_NODES] \
+setcpuaffinity +isomalloc_sync apoa1.namd > apoa1.log

NAMD3 on Sugon

#!/bin/bash
#SBATCH --job-name=namd3
#SBATCH --output=out_%j_namd3.txt
#SBATCH --error=err_%j_namd3.txt
#SBATCH --nodes=1
#SBATCH --gres=gpu:2
#SBATCH --nodelist=gpu9
#SBATCH --partition=A40
#SBATCH --cpus-per-task=15

# 可选
cd $SLURM_SUBMIT_DIR

/public/apps/namd3/NAMD_3.0.1_Linux-x86_64-multicore-CUDA/namd3 \
    +p15 +pmepes 7 \
    +setcpuaffinity \
    +devices 0,1 \
    apoa1.namd > apoa1.log