Skip to content
This repository was archived by the owner on May 15, 2024. It is now read-only.

Commit ef96b22

Browse files
committed
update
1 parent ab11b34 commit ef96b22

File tree

1 file changed

+0
-14
lines changed

1 file changed

+0
-14
lines changed

utils/cuda_helper.py

-14
Original file line numberDiff line numberDiff line change
@@ -23,20 +23,6 @@ def gpuones(*size):
2323
with torch.cuda.device(device_num):
2424
return torch.cuda.FloatTensor(*size).fill_(1)
2525

26-
def check_mem(cuda_device):
27-
devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n")
28-
total, used = devices_info[int(cuda_device)].split(',')
29-
return total,used
30-
31-
def occumpy_mem(cuda_device):
32-
total, used = check_mem(cuda_device)
33-
total = int(total)
34-
used = int(used)
35-
max_mem = int(total * 0.9)
36-
block_mem = max_mem - used
37-
x = torch.cuda.FloatTensor(256,1024,block_mem)
38-
del x
39-
4026
# pylint: disable=W0603
4127
def init(device=None, __cuda=False):
4228
global _cuda, _zeros, _ones, use_cuda, device_num, _tensor_type, _long_tensor_type

0 commit comments

Comments
 (0)