forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Memory.cpp
41 lines (36 loc) · 1.19 KB
/
Memory.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
#include <ATen/ATen.h>
#include <ATen/MemoryOverlap.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <c10/util/Exception.h>
#include <c10/core/Storage.h>
namespace at {
namespace native {
bool is_pinned(const Tensor& self) {
return detail::getCUDAHooks().isPinnedPtr(self.storage().data());
}
Tensor pin_memory(const Tensor& self) {
if (self.options().backend() != Backend::CPU) {
AT_ERROR("cannot pin '", self.toString(), "' only dense CPU tensors can be pinned");
}
if (self.is_pinned()) {
return self;
}
auto* allocator = detail::getCUDAHooks().getPinnedMemoryAllocator();
auto storage = Storage(
Storage::use_byte_size_t(),
detail::computeStorageNbytes(
self.sizes(), self.strides(), self.dtype().itemsize()),
allocator,
/*resizable=*/false);
auto tensor = at::empty({0}, self.options()).set_(storage, 0, self.sizes(), self.strides());
tensor.copy_(self);
return tensor;
}
// Exposes at::has_internal_overlap as an operator for testing purposes
int64_t _debug_has_internal_overlap(const Tensor& self) {
return static_cast<int64_t>(at::has_internal_overlap(self));
}
}
}