forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathvariable_tensor_functions.h
37 lines (27 loc) · 1.58 KB
/
variable_tensor_functions.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#pragma once
#include <ATen/ATen.h>
#include <torch/csrc/THP_export.h>
namespace torch {
// NOTE: This API is currently highly experimental and may change drastically
// in the near future.
// These functions provide a small wrapper around aten ensuring
// that we create tensors with type Variable rather than raw tensors
// when we create new tensors. We also provide a few accessors like requires_grad
// that make it easier to get to varible information when we have a at::Tensor
/// Returns a `TypeExtendedInterface` object for the given backend (e.g. `at::kCPU`) and
/// `ScalarType` (e.g. `at::kDouble`).
/// TODO: Eliminate this function as much as possible
THP_CLASS at::TypeExtendedInterface& getVariableType(at::Backend backend, at::ScalarType type);
/// Returns a `TypeExtendedInterface` object for the CPU backend and the given `ScalarType`
/// (e.g. `at::kDouble`). Equivalent to `getVariableType(kCPU, type)`.
/// TODO: Eliminate this function as much as possible
THP_CLASS at::TypeExtendedInterface& CPU(at::ScalarType type);
/// Returns a `TypeExtendedInterface` object for the CUDA backend and the given `ScalarType`
/// (e.g. `at::kDouble`). Equivalent to `getVariableType(kCUDA, type)`.
/// TODO: Eliminate this function as much as possible
THP_CLASS at::TypeExtendedInterface& CUDA(at::ScalarType type);
/// Sets the `requires_grad` property of the given `Tensor`.
THP_CLASS void set_requires_grad(at::Tensor& tensor, bool requires_grad) noexcept;
/// Returns the `requires_grad` of the given `Tensor`.
THP_CLASS bool requires_grad(const at::Tensor& tensor) noexcept;
} // namespace torch