generated from Axect/pytorch_template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
requirements.txt
168 lines (168 loc) · 2.93 KB
/
requirements.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
# This file was autogenerated by uv via the following command:
# uv pip compile - -o requirements.txt
alembic==1.13.2
# via optuna
beaupy==3.9.2
certifi==2024.8.30
# via
# requests
# sentry-sdk
charset-normalizer==3.3.2
# via requests
click==8.1.7
# via wandb
colorlog==6.8.2
# via optuna
contourpy==1.3.0
# via matplotlib
cycler==0.12.1
# via matplotlib
docker-pycreds==0.4.0
# via wandb
emoji==2.13.2
# via beaupy
filelock==3.15.4
# via
# torch
# triton
fonttools==4.53.1
# via matplotlib
fsspec==2024.6.1
# via torch
gitdb==4.0.11
# via gitpython
gitpython==3.1.43
# via wandb
greenlet==3.0.3
# via sqlalchemy
idna==3.8
# via requests
jinja2==3.1.4
# via torch
kiwisolver==1.4.5
# via matplotlib
mako==1.3.5
# via alembic
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via
# jinja2
# mako
matplotlib==3.9.2
# via scienceplots
mdurl==0.1.2
# via markdown-it-py
mpmath==1.3.0
# via sympy
networkx==3.3
# via torch
numpy==2.1.0
# via
# contourpy
# matplotlib
# optuna
nvidia-cublas-cu12==12.1.3.1
# via
# nvidia-cudnn-cu12
# nvidia-cusolver-cu12
# torch
nvidia-cuda-cupti-cu12==12.1.105
# via torch
nvidia-cuda-nvrtc-cu12==12.1.105
# via torch
nvidia-cuda-runtime-cu12==12.1.105
# via torch
nvidia-cudnn-cu12==9.1.0.70
# via torch
nvidia-cufft-cu12==11.0.2.54
# via torch
nvidia-curand-cu12==10.3.2.106
# via torch
nvidia-cusolver-cu12==11.4.5.107
# via torch
nvidia-cusparse-cu12==12.1.0.106
# via
# nvidia-cusolver-cu12
# torch
nvidia-nccl-cu12==2.20.5
# via torch
nvidia-nvjitlink-cu12==12.6.68
# via
# nvidia-cusolver-cu12
# nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.1.105
# via torch
optuna==3.6.1
packaging==24.1
# via
# matplotlib
# optuna
pillow==10.4.0
# via matplotlib
platformdirs==4.2.2
# via wandb
polars==1.6.0
protobuf==5.28.0
# via wandb
psutil==6.0.0
# via wandb
pygments==2.18.0
# via rich
pyparsing==3.1.4
# via matplotlib
python-dateutil==2.9.0.post0
# via matplotlib
python-yakh==0.3.2
# via
# beaupy
# questo
pyyaml==6.0.2
# via
# optuna
# wandb
questo==0.3.0
# via beaupy
requests==2.32.3
# via wandb
rich==13.8.1
# via
# beaupy
# questo
scienceplots==2.1.1
sentry-sdk==2.13.0
# via wandb
setproctitle==1.3.3
# via wandb
setuptools==74.0.0
# via
# torch
# wandb
six==1.16.0
# via
# docker-pycreds
# python-dateutil
smmap==5.0.1
# via gitdb
sqlalchemy==2.0.32
# via
# alembic
# optuna
survey==5.4.0
sympy==1.13.2
# via torch
torch==2.4.0
tqdm==4.66.5
# via optuna
triton==3.0.0
# via torch
typing-extensions==4.12.2
# via
# alembic
# sqlalchemy
# torch
urllib3==2.2.2
# via
# requests
# sentry-sdk
wandb==0.17.8