Skip to content

Commit

Permalink
Converted openmm-nn plugin to PyTorch
Browse files Browse the repository at this point in the history
  • Loading branch information
peastman committed Sep 27, 2019
0 parents commit 81e5775
Show file tree
Hide file tree
Showing 50 changed files with 15,380 additions and 0 deletions.
156 changes: 156 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
#---------------------------------------------------
# OpenMM NeuralNetwork Plugin
#----------------------------------------------------

CMAKE_MINIMUM_REQUIRED(VERSION 3.5)

# We need to know where OpenMM is installed so we can access the headers and libraries.
SET(OPENMM_DIR "/usr/local/openmm" CACHE PATH "Where OpenMM is installed")
INCLUDE_DIRECTORIES("${OPENMM_DIR}/include")
LINK_DIRECTORIES("${OPENMM_DIR}/lib" "${OPENMM_DIR}/lib/plugins")

# We need to know where LibTorch is installed so we can access the headers and libraries.
SET(PYTORCH_DIR "" CACHE PATH "Where the PyTorch C++ API is installed")
SET(CMAKE_PREFIX_PATH "${PYTORCH_DIR}")
FIND_PACKAGE(Torch REQUIRED)
#LINK_DIRECTORIES("${TENSORFLOW_DIR}/lib")

# Specify the C++ version we are building for.
SET (CMAKE_CXX_STANDARD 11)

# Set flags for linking on mac
IF(APPLE)
SET (CMAKE_INSTALL_NAME_DIR "@rpath")
SET(EXTRA_COMPILE_FLAGS "-msse2 -stdlib=libc++")
ENDIF(APPLE)

# Select where to install
IF(${CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT})
IF(WIN32)
SET(CMAKE_INSTALL_PREFIX "$ENV{ProgramFiles}/OpenMM" CACHE PATH "Where to install the plugin" FORCE)
ELSE(WIN32)
SET(CMAKE_INSTALL_PREFIX "/usr/local/openmm" CACHE PATH "Where to install the plugin" FORCE)
ENDIF(WIN32)
ENDIF(${CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT})

# Put all the tests and libraries in a single output directory.
IF(NOT EXECUTABLE_OUTPUT_PATH)
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}
CACHE INTERNAL "Single output directory for building all executables.")
ENDIF()
IF(NOT LIBRARY_OUTPUT_PATH)
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}
CACHE INTERNAL "Single output directory for building all libraries.")
ENDIF()
SET(${PROJECT_NAME}_EXECUTABLE_DIR ${EXECUTABLE_OUTPUT_PATH}/${CMAKE_CFG_INTDIR})
SET(${PROJECT_NAME}_LIBRARY_DIR ${LIBRARY_OUTPUT_PATH}/${CMAKE_CFG_INTDIR})

# The source is organized into subdirectories, but we handle them all from
# this CMakeLists file rather than letting CMake visit them as SUBDIRS.
SET(NN_PLUGIN_SOURCE_SUBDIRS openmmapi serialization)

# Set the library name
SET(NN_LIBRARY_NAME OpenMMNN)
SET(SHARED_NN_TARGET ${NN_LIBRARY_NAME})

# These are all the places to search for header files which are to be part of the API.
SET(API_INCLUDE_DIRS "openmmapi/include" "openmmapi/include/internal")

# Locate header files.
SET(API_INCLUDE_FILES)
FOREACH(dir ${API_INCLUDE_DIRS})
FILE(GLOB fullpaths ${dir}/*.h)
SET(API_INCLUDE_FILES ${API_INCLUDE_FILES} ${fullpaths})
ENDFOREACH(dir)

# Collect up source files
SET(SOURCE_FILES) # empty
SET(SOURCE_INCLUDE_FILES)
FOREACH(subdir ${NN_PLUGIN_SOURCE_SUBDIRS})
FILE(GLOB src_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.cpp)
FILE(GLOB incl_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.h)
SET(SOURCE_FILES ${SOURCE_FILES} ${src_files}) #append
SET(SOURCE_INCLUDE_FILES ${SOURCE_INCLUDE_FILES} ${incl_files})

## Make sure we find these locally before looking in OpenMM/include if
## OpenMM was previously installed there.
INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/include)
ENDFOREACH(subdir)

# Create the library.

ADD_LIBRARY(${SHARED_NN_TARGET} SHARED ${SOURCE_FILES} ${SOURCE_INCLUDE_FILES} ${API_INCLUDE_FILES})
SET_TARGET_PROPERTIES(${SHARED_NN_TARGET}
PROPERTIES COMPILE_FLAGS "-DNN_BUILDING_SHARED_LIBRARY ${EXTRA_COMPILE_FLAGS}"
LINK_FLAGS "${EXTRA_COMPILE_FLAGS}")
TARGET_LINK_LIBRARIES(${SHARED_NN_TARGET} OpenMM)
TARGET_LINK_LIBRARIES(${SHARED_NN_TARGET} "${TORCH_LIBRARIES}")
INSTALL_TARGETS(/lib RUNTIME_DIRECTORY /lib ${SHARED_NN_TARGET})

# install headers
FILE(GLOB API_ONLY_INCLUDE_FILES "openmmapi/include/*.h")
INSTALL (FILES ${API_ONLY_INCLUDE_FILES} DESTINATION include)
FILE(GLOB API_ONLY_INCLUDE_FILES_INTERNAL "openmmapi/include/internal/*.h")
INSTALL (FILES ${API_ONLY_INCLUDE_FILES_INTERNAL} DESTINATION include/internal)

# Enable testing

ENABLE_TESTING()
ADD_SUBDIRECTORY(serialization/tests)

# Copy test files to the build directory.

file(GLOB_RECURSE TEST_FILES RELATIVE "${CMAKE_SOURCE_DIR}"
"${CMAKE_SOURCE_DIR}/tests/*.pt"
)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/tests)
set(COPIED_TEST_FILES)
foreach(TEST_FILE ${TEST_FILES})
set(infile "${CMAKE_SOURCE_DIR}/${TEST_FILE}")
set(outfile "${CMAKE_BINARY_DIR}/${TEST_FILE}")
add_custom_command(
OUTPUT "${outfile}"
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${infile}" "${outfile}"
DEPENDS "${infile}"
COMMENT "CMake-copying file ${infile} to ${outfile}")
set(COPIED_TEST_FILES ${COPIED_TEST_FILES} "${outfile}")
endforeach()
add_custom_target(CopyTestFiles ALL DEPENDS ${COPIED_TEST_FILES})

# Build the implementations for different platforms

ADD_SUBDIRECTORY(platforms/reference)

SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}")
FIND_PACKAGE(OpenCL QUIET)
IF(OPENCL_FOUND)
SET(NN_BUILD_OPENCL_LIB ON CACHE BOOL "Build implementation for OpenCL")
ELSE(OPENCL_FOUND)
SET(NN_BUILD_OPENCL_LIB OFF CACHE BOOL "Build implementation for OpenCL")
ENDIF(OPENCL_FOUND)
IF(NN_BUILD_OPENCL_LIB)
ADD_SUBDIRECTORY(platforms/opencl)
ENDIF(NN_BUILD_OPENCL_LIB)

FIND_PACKAGE(CUDA QUIET)
IF(CUDA_FOUND)
SET(NN_BUILD_CUDA_LIB ON CACHE BOOL "Build implementation for CUDA")
ELSE(CUDA_FOUND)
SET(NN_BUILD_CUDA_LIB OFF CACHE BOOL "Build implementation for CUDA")
ENDIF(CUDA_FOUND)
IF(NN_BUILD_CUDA_LIB)
ADD_SUBDIRECTORY(platforms/cuda)
ENDIF(NN_BUILD_CUDA_LIB)

# Build the Python API

FIND_PROGRAM(PYTHON_EXECUTABLE python)
FIND_PROGRAM(SWIG_EXECUTABLE swig)
IF(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE)
SET(NN_BUILD_PYTHON_WRAPPERS ON CACHE BOOL "Build wrappers for Python")
ELSE(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE)
SET(NN_BUILD_PYTHON_WRAPPERS OFF CACHE BOOL "Build wrappers for Python")
ENDIF(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE)
IF(NN_BUILD_PYTHON_WRAPPERS)
ADD_SUBDIRECTORY(python)
ENDIF(NN_BUILD_PYTHON_WRAPPERS)
95 changes: 95 additions & 0 deletions FindOpenCL.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@

### OPENCL_INCLUDE_DIR ###
# Try OPENCL_DIR variable before looking elsewhere
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS $ENV{OPENCL_DIR}
PATH_SUFFIXES "include"
NO_DEFAULT_PATH
)
# Next look in environment variables set by OpenCL SDK installations
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS
$ENV{CUDA_PATH}
$ENV{AMDAPPSDKROOT}
PATH_SUFFIXES "include"
NO_DEFAULT_PATH
)
# On Macs, look inside the platform SDK
if(DEFINED CMAKE_OSX_SYSROOT)
find_path(OPENCL_INCLUDE_DIR
NAMES opencl.h opencl.h
PATHS
"${CMAKE_OSX_SYSROOT}/System/Library/Frameworks/OpenCL.framework/Headers"
NO_DEFAULT_PATH
)
endif(DEFINED CMAKE_OSX_SYSROOT)
# As a last resort, look in default system areas followed by other possible locations
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS
"C:/CUDA"
"/usr/local/cuda"
"/usr/local/streamsdk"
"/usr"
PATH_SUFFIXES "include"
)

### OPENCL_LIBRARY ###
if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
set(path_suffixes "lib/x86_64")
else("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
set(path_suffixes "lib/x86")
endif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
elseif(MSVC)
if(CMAKE_CL_64)
set(path_suffixes "lib/x64" "lib/x86_64")
else(CMAKE_CL_64)
set(path_suffixes "lib/Win32" "lib/x86")
endif(CMAKE_CL_64)
else(MSVC)
set(path_suffixes "lib")
endif("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
# Try OPENCL_DIR variable before looking elsewhere
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
$ENV{OPENCL_DIR}
${OPENCL_LIB_SEARCH_PATH}
PATH_SUFFIXES ${path_suffixes}
NO_DEFAULT_PATH
)
# Next look in environment variables set by OpenCL SDK installations
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
$ENV{CUDA_PATH}
$ENV{AMDAPPSDKROOT}
PATH_SUFFIXES ${path_suffixes}
NO_DEFAULT_PATH
)
# As a last resort, look in default system areas followed by other possible locations
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
"C:/CUDA"
"/usr/local/cuda"
"/usr/local/streamsdk"
"/usr"
PATH_SUFFIXES ${path_suffixes} "lib"
)

include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(OPENCL DEFAULT_MSG OPENCL_LIBRARY OPENCL_INCLUDE_DIR)

if(OPENCL_FOUND)
set(OPENCL_LIBRARIES ${OPENCL_LIBRARY})
mark_as_advanced(CLEAR OPENCL_INCLUDE_DIR)
mark_as_advanced(CLEAR OPENCL_LIBRARY)
else(OPENCL_FOUND)
set(OPENCL_LIBRARIES)
mark_as_advanced(OPENCL_INCLUDE_DIR)
mark_as_advanced(OPENCL_LIBRARY)
endif(OPENCL_FOUND)
130 changes: 130 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
OpenMM Neural Network Plugin
============================

This is a plugin for [OpenMM](http://openmm.org) that allows neural networks
to be used for defining forces. It is implemented with [PyTorch](https://pytorch.org/).
To use it, you create a PyTorch model that takes particle positions as input
and produces energy as output. This plugin uses the model to apply
forces to particles during a simulation.

Installation
============

At present this plugin must be compiled from source. It uses CMake as its build
system. Before compiling you must install LibTorch, which is the PyTorch C++ API,
by following the instructions at https://pytorch.org. You can then
follow these steps.

1. Create a directory in which to build the plugin.

2. Run the CMake GUI or ccmake, specifying your new directory as the build directory and the top
level directory of this project as the source directory.

3. Press "Configure". (Do not worry if it produces an error message about not being able to find PyTorch.)

4. Set OPENMM_DIR to point to the directory where OpenMM is installed. This is needed to locate
the OpenMM header files and libraries.

5. Set PYTORCH_DIR to point to the directory where you installed the LibTorch.

6. Set CMAKE_INSTALL_PREFIX to the directory where the plugin should be installed. Usually,
this will be the same as OPENMM_DIR, so the plugin will be added to your OpenMM installation.

7. If you plan to build the OpenCL platform, make sure that OPENCL_INCLUDE_DIR and
OPENCL_LIBRARY are set correctly, and that NN_BUILD_OPENCL_LIB is selected.

8. If you plan to build the CUDA platform, make sure that CUDA_TOOLKIT_ROOT_DIR is set correctly
and that NN_BUILD_CUDA_LIB is selected.

9. Press "Configure" again if necessary, then press "Generate".

10. Use the build system you selected to build and install the plugin. For example, if you
selected Unix Makefiles, type `make install` to install the plugin, and `make PythonInstall` to
install the Python wrapper.

Usage
=====

The first step is to create a PyTorch model defining the calculation to
perform. It should take particle positions (in the form of an Nx3 Tensor) as
its input, and return the potential energy as its output. The model must then be
converted to a TorchScript module and saved to a file. Converting to TorchScript
can usually be done with a single call to `torch.jit.script()` or `torch.jit.trace()`,
although more complicated models can sometimes require extra steps. See the
[PyTorch documentation](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html)
for details. Here is an example of Python code that does this for a very
simple calculation (a harmonic force attracting every particle to the origin).

```python
import torch

class ForceModule(torch.nn.Module):
def forward(self, positions):
return torch.sum(positions**2)

module = torch.jit.script(ForceModule())
module.save('model.pt')
```

To use the model in a simulation, create a `NeuralNetworkForce` object and add
it to your `System`. The constructor takes the path to the saved model as an
argument. For example,

```python
from openmmnn import *
f = NeuralNetworkForce('model.pt')
system.addForce(f)
```

When defining the model to perform a calculation, you may want to apply
periodic boundary conditions. To do this, call `setUsesPeriodicBoundaryConditions(True)`
on the `NeuralNetworkForce`. The graph is then expected to take a second input,
which contains the current periodic box vectors. You
can make use of them in whatever way you want for computing the force. For
example, the following code applies periodic boundary conditions to each
particle position to translate all of them into a single periodic cell.

```python
class ForceModule(torch.nn.Module):
def forward(self, positions, boxvectors):
boxsize = boxvectors.diag()
periodicPositions = positions - torch.floor(positions/boxsize)*boxsize
return torch.sum(periodicPositions**2)
```

Note that this code assumes a rectangular box. Applying periodic boundary
conditions with a triclinic box requires a slightly more complicated
calculation.

License
=======

This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.

Portions copyright (c) 2018 Stanford University and the Authors.

Authors: Peter Eastman

Contributors:

Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.

Loading

0 comments on commit 81e5775

Please sign in to comment.