forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
__init__.pyi.in
146 lines (116 loc) · 4.59 KB
/
__init__.pyi.in
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# ${generated_comment}
from typing import List, Tuple, Optional, Union, Any, ContextManager, Callable, overload, Iterator
from torch._six import inf
import builtins
# These identifiers are reexported from other modules. These modules
# are not mypy-clean yet, so in order to use this stub file usefully
# from mypy you will need to specify --follow-imports=silent.
# Not all is lost: these imports still enable IDEs like PyCharm to offer
# autocomplete.
#
# Note: Why does the syntax here look so strange? Import visibility
# rules in stubs are different from normal Python files! You must use
# 'from ... import ... as ...' syntax to cause an identifier to be
# exposed (or use a wildcard); regular syntax is not exposed.
from .random import set_rng_state as set_rng_state, get_rng_state as get_rng_state, \
manual_seed as manual_seed, initial_seed as initial_seed, seed as seed
from ._tensor_str import set_printoptions as set_printoptions
from .functional import *
from .serialization import save as save, load as load
from .autograd import no_grad as no_grad, enable_grad as enable_grad, \
set_grad_enabled as set_grad_enabled
from . import cuda as cuda
from . import optim as optim
from . import nn as nn
class dtype: ...
class layout: ...
strided : layout = ...
class memory_format: ...
contiguous_format: memory_format = ...
class qscheme: ...
per_tensor_affine: qscheme = ...
# See https://github.com/python/mypy/issues/4146 for why these workarounds
# is necessary
_int = builtins.int
_float = builtins.float
_bool = builtins.bool
class device:
type: str
index: _int
@overload
def __init__(self, device: Union[_int, str]) -> None: ...
@overload
def __init__(self, type: str, index: _int) -> None: ...
class Size(tuple): ...
class Storage: ...
# See https://github.com/python/mypy/issues/4146 for why these workarounds
# is necessary
_dtype = dtype
_device = device
_qscheme = qscheme
_size = Union[Size, List[_int], Tuple[_int, ...]]
_layout = layout
# Meta-type for "numeric" things; matches our docs
Number = Union[builtins.int, builtins.float, builtins.bool]
class Generator:
device: _device = ...
@overload
def __init__(self, device: Optional[_device]=None) -> None: ...
@overload
def __init__(self, device: Union[_int, str]) -> None: ...
# TODO: One downside of doing it this way, is direct use of
# torch.tensor.Tensor doesn't get type annotations. Nobody
# should really do that, so maybe this is not so bad.
class Tensor:
requires_grad: _bool = ...
grad: Optional[Tensor] = ...
data: Tensor = ...
names: List[str] = ...
@property
def dtype(self) -> _dtype: ...
@property
def shape(self) -> Size: ...
@property
def device(self) -> _device: ...
@property
def T(self) -> Tensor: ...
@property
def grad_fn(self) -> Optional[Any]: ...
@property
def ndim(self) -> _int: ...
@property
def layout(self) -> _layout: ...
${tensor_method_hints}
# Manually defined methods from torch/tensor.py
def __len__(self) -> _int: ...
def __iter__(self) -> Iterator[Tensor]: ...
def __contains__(self, item: Union[Tensor, Number]) -> _bool: ...
def register_hook(self, hook: Callable) -> Any: ...
def retain_grad(self) -> None: ...
def is_shared(self) -> _bool: ...
def share_memory_(self) -> None: ...
# TODO: fill in the types for these, or otherwise figure out some
# way to not have to write these out again...
def nonzero(self, *, as_tuple=True): ...
def norm(self, p="fro", dim=None, keepdim=False): ...
def stft(self, n_fft, hop_length=None, win_length=None, window=None,
center=True, pad_mode='reflect', normalized=False, onesided=True): ...
def split(self, split_size, dim=0): ...
def unique(self, sorted=True, return_inverse=False, dim=None): ...
def unique_consecutive(self, sorted=True, return_inverse=False, return_counts=False, dim=None): ...
def lu(self, pivot=True, get_infos=False): ...
${function_hints}
${legacy_class_hints}
${dtype_class_hints}
# Pure Python functions defined in torch/__init__.py
def typename(obj) -> str: ...
def is_tensor(obj) -> _bool: ...
def is_storage(obj) -> _bool: ...
def set_default_tensor_type(type) -> None: ... # ick, what a bad legacy API
def set_default_dtype(d : _dtype) -> None: ...
def manager_path() -> str: ...
def compiled_with_cxx11_abi() -> _bool: ...
# The return value of this function depends on the value of `as_tuple`,
# (similar to `unique`, `lu`, etc.); as such, it is not
# possible to type correctly
def nonzero(input: Tensor, *, out: Optional[Tensor]=None, as_tuple: Optional[_bool]=None): ...