-
Notifications
You must be signed in to change notification settings - Fork 22
/
setup.py
115 lines (96 loc) · 3.67 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import glob
import os
import shutil
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "detectron2", "layers", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
# It's better if pytorch can do this by default ..
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detectron2._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
destination = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
if os.path.exists(destination):
# Remove stale symlink/directory from a previous build.
if os.path.islink(destination):
os.unlink(destination)
else:
shutil.rmtree(destination)
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True)
return config_paths
setup(
name="detectron2",
version="0.1",
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests")),
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
"termcolor>=1.1",
"Pillow>=6.0",
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
],
extras_require={"all": ["shapely", "psutil"], "dev": ["flake8", "isort", "black==19.3b0"]},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)