Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[API2.0] add op for cudnn version query test=develop #26180

Merged
merged 9 commits into from
Aug 16, 2020
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/platform/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ ENDIF()
cc_library(cpu_info SRCS cpu_info.cc DEPS ${CPU_INFO_DEPS})
cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info)

nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce monitor)
nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce monitor dynload_cuda)
willthefrog marked this conversation as resolved.
Show resolved Hide resolved

cc_library(place SRCS place.cc DEPS enforce boost)
cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
Expand Down
13 changes: 9 additions & 4 deletions paddle/fluid/platform/gpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ limitations under the License. */

#include "gflags/gflags.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
#include "paddle/fluid/platform/dynload/cudnn.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/lock_guard_ptr.h"
#include "paddle/fluid/platform/macros.h"
Expand All @@ -38,11 +39,11 @@ USE_GPU_MEM_STAT;
namespace paddle {
namespace platform {

/* Here is a very simple CUDA “pro tip”: cudaDeviceGetAttribute() is a much
faster way to query device properties. You can see details in
https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/
*/
int CudnnVersion() {
if (!dynload::HasCUDNN()) return -1;

return dynload::cudnnGetVersion();
}
static int GetCUDADeviceCountImpl() {
int driverVersion = 0;
cudaError_t status = cudaDriverGetVersion(&driverVersion);
Expand Down Expand Up @@ -73,6 +74,10 @@ int GetCUDADeviceCount() {
return dev_cnt;
}

/* Here is a very simple CUDA “pro tip”: cudaDeviceGetAttribute() is a much
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why is this comment here? looks like that cudaDeviceGetAttribute is not used anyway

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This comment is for GetCUDAComputeCapability, I change it to right position by the way

faster way to query device properties. You can see details in
https://devblogs.nvidia.com/cuda-pro-tip-the-fast-way-to-query-device-properties/
*/
int GetCUDAComputeCapability(int id) {
PADDLE_ENFORCE_LT(id, GetCUDADeviceCount(),
platform::errors::InvalidArgument(
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/platform/gpu_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@ limitations under the License. */

namespace paddle {
namespace platform {
//! Get the version of cudnn
int CudnnVersion();

//! Get the total number of GPU devices in system.
int GetCUDADeviceCount();
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -341,6 +341,10 @@ PYBIND11_MODULE(core_noavx, m) {

m.def("set_num_threads", &platform::SetNumThreads);

#ifdef PADDLE_WITH_CUDA
m.def("cudnn_version", &platform::CudnnVersion);
#endif

m.def("from_dlpack", [](py::capsule *dltensor) {
DLManagedTensor *dmt = reinterpret_cast<DLManagedTensor *>(
PyCapsule_GetPointer(dltensor->ptr(), "dltensor"));
Expand Down
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,8 @@
from .tensor.stat import std #DEFINE_ALIAS
from .tensor.stat import var #DEFINE_ALIAS
from .fluid.data import data
from .device import get_cudnn_version
# from .tensor.tensor import Tensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensor #DEFINE_ALIAS
# from .tensor.tensor import LoDTensorArray #DEFINE_ALIAS

Expand Down
49 changes: 42 additions & 7 deletions python/paddle/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,45 @@
# limitations under the License.

# TODO: define the functions to manipulate devices
# __all__ = ['cpu_places',
# 'CPUPlace',
# 'cuda_pinned_places',
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
# 'is_compiled_with_cuda']
from paddle.fluid import core
__all__ = [
'get_cudnn_version',
# 'cpu_places',
# 'CPUPlace',
# 'cuda_pinned_places',
# 'cuda_places',
# 'CUDAPinnedPlace',
# 'CUDAPlace',
# 'is_compiled_with_cuda'
]

_cudnn_version = None


def get_cudnn_version():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the return value should be cached, as it will never change and this function may be called on each step for each OP that use it in dygraph mode.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

"""
This funciton return the version of cudnn. the retuen value is int which represents the
cudnn version. For example, if it return 7600, it represents the version of cudnn is 7.6.

Returns:
int: A int value which represents the cudnn version. If cudnn version is not installed, it return None.

Examples:
.. code-block:: python

import paddle

cudnn_version = get_cudnn_version()



"""
global _cudnn_version
if _cudnn_version is None:
cudnn_version = core.cudnn_version()
if cudnn_version < 0:
return None
else:
return cudnn_version
else:
return _cudnn_version
32 changes: 32 additions & 0 deletions python/paddle/fluid/tests/unittests/test_query_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import paddle
from paddle.fluid import core


class TestCudnnVersion(unittest.TestCase):
def test_no_cudnn(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this can be merged into the test below, since you can not test both case in one go.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

cudnn_version = paddle.cudnn_version()
if not core.is_compiled_with_cuda():
self.assertEqual((cudnn_version is None), True)
else:
self.assertEqual((isinstance(cudnn_version, int)), True)


if __name__ == '__main__':
unittest.main()