Skip to content

Instantly share code, notes, and snippets.

View pyhope's full-sized avatar

Yihang Peng pyhope

  • Princeton University
  • Princeton, NJ
  • 02:46 (UTC -05:00)
View GitHub Profile
#!/bin/bash
set -e
module purge
module load intel-oneapi/2024.2 intel-mpi/oneapi/2021.13 intel-mkl/2024.2 hdf5/oneapi-2024.2/intel-mpi/1.14.4
cp /path/to/your/vasp.6.4.3.tgz .
tar -zxvf vasp.6.4.3.tgz
cd vasp.6.4.3
#!/usr/bin/env bash
# watch_squeue_idle.sh
set -euo pipefail
USER_ID="yp0007"
SQUEUE_FMT="%.18i %.9P %.8j %.2t %.10M %.6D %Z"
WARN_AFTER=60
KILL_AFTER=600
@pyhope
pyhope / install_vasp_gpu_della9.sh
Created August 22, 2025 08:08
Install VASP 6.3 GPU version on Della9
#!/bin/bash
## Works as tested on Princeton Della9 Cluster on August 22, 2025.
cd /path/to/vasp/6.3.2
module purge
module load nvhpc/24.11 cudatoolkit/12.6 openmpi/cuda-12.6/nvhpc-24.11/4.1.6 intel-mkl/2024.2
export NVHPC_CUDA_HOME=/opt/nvidia/hpc_sdk/Linux_x86_64/24.11
cat > makefile.include << 'EOF'
@pyhope
pyhope / bgw_install.sh
Created August 12, 2025 18:12
Compile BerkeleyGW-4.0 on Tiger
#!/bin/bash
## Works as tested on Princeton Tiger3 Cluster on August 12, 2025.
cd path/to/your/BerkeleyGW-4.0 # Source code available at https://berkeleygw.org/download/
cp flavor_cplx.mk flavor.mk
cat > arch.mk <<'EOF'
COMPFLAG = -DINTEL -DONE_API_WORKAROUND
PARAFLAG = -DMPI -DOMP
MATHFLAG = -DUNPACKED -DUSEFFTW3 -DUSESCALAPACK
@pyhope
pyhope / install_plumed2.8_lammps.sh
Created May 27, 2025 07:40 — forked from jinalee314/install_plumed2.8_lammps.sh
Install DeePMD-kit, PLUMED (v2.8.2), and LAMMPS (stable_2Aug2023_update3)
## Works for Della cluster at Princeton University as of 6/27/2024
ssh della-vis1 # use for installation
cd /directory-to-download-your-source-code-to/
## DeePMD-kit installation
git clone https://github.com/deepmodeling/deepmd-kit.git deepmd-kit-new # can choose folder name
cd deepmd-kit-new
@pyhope
pyhope / qe_install.sh
Created February 7, 2025 01:05
Installation scripts for quantum espresso on Della and Tiger3
# Installation scripts for quantum espresso
# Adapted from https://researchcomputing.princeton.edu/support/knowledge-base/quantum-espresso
# Della
cd qe-7.x
mkdir build && cd build
module purge
module load openmpi/gcc/4.1.2
module load fftw/gcc/3.3.9
OPTFLAGS="-O3 -march=native -DNDEBUG"
@pyhope
pyhope / run.slurm
Last active January 27, 2023 19:55
An example slurm script for DeePMD-LAMMPS simulation
#!/bin/bash
#SBATCH --job-name=example # create a short name for your job
#SBATCH --nodes=1 # node count
#SBATCH --ntasks=4 # total number of tasks across all nodes
#SBATCH --cpus-per-task=1 # cpu-cores per task (>1 if multi-threaded tasks)
#SBATCH --gres=gpu:4 # number of gpus per node
#SBATCH --mem-per-cpu=16G # memory per cpu-core (4G is default)
#SBATCH --time=1-00:00:00 # total run time limit (D-HH:MM:SS)
#SBATCH --mail-type=begin # send email when job begins
#SBATCH --mail-type=end # send email when job ends
@pyhope
pyhope / install_vasp.sh
Last active November 8, 2024 02:51
install_vasp
## Works as tested on Princeton Della and Tiger Cluster on Jan 10, 2023.
## Please confirm the version you want to install and the computer cluster, and run the corresponding code blocks line by line in the terminal.
## When submitting tasks, please load all modules that were loaded during installation. An example slurm script can be found here: https://researchcomputing.princeton.edu/support/knowledge-base/vasp.
# VASP.6.x on Della
cd /path/to/vasp.6.x # This should be the directory of your VASP source code
cp arch/makefile.include.intel_omp ./makefile.include
# for Intel Parallel Studio's MKL use -mkl instead of -qmkl
sed -i 's/FCL += -qmkl/FCL += -mkl/g' makefile.include
module purge
## Works as tested on Princeton Della Cluster on April 16, 2023.
module load anaconda3/2021.5
conda create -n dpdev python=3.10 # Customize a name (e.g. dpdev, deepmd, ...) for this conda environment
conda activate dpdev
export CONDA_OVERRIDE_CUDA=11.6
# install libtensorflow_cc gpu version (Note that only libtensorflow_cc in the deepmodeling channel enables GPU. The libtensorflow_cc library in the conda-forge channel can only be used on GPU.)
conda install -c deepmodeling libtensorflow_cc=*=cuda11*