This repository has been archived by the owner on Oct 9, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 211
/
setup.py
128 lines (118 loc) · 5.04 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#!/usr/bin/env python
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from functools import partial
from importlib.util import module_from_spec, spec_from_file_location
from setuptools import find_packages, setup
# https://packaging.python.org/guides/single-sourcing-package-version/
# http://blog.ionelmc.ro/2014/05/25/python-packaging/
_PATH_ROOT = os.path.dirname(__file__)
_PATH_REQUIRE = os.path.join(_PATH_ROOT, "requirements")
def _load_py_module(fname, pkg="flash"):
spec = spec_from_file_location(
os.path.join(pkg, fname),
os.path.join(_PATH_ROOT, pkg, fname),
)
py = module_from_spec(spec)
spec.loader.exec_module(py)
return py
about = _load_py_module("__about__.py")
setup_tools = _load_py_module("setup_tools.py")
long_description = setup_tools._load_readme_description(
_PATH_ROOT,
homepage=about.__homepage__,
ver=about.__version__,
)
base_req = setup_tools._load_requirements(path_dir=_PATH_ROOT, file_name="requirements.txt")
# find all extra requirements
_load_req = partial(setup_tools._load_requirements, path_dir=_PATH_REQUIRE)
SKIP_REQ_FILES = "devel.txt"
found_req_files = sorted(os.path.basename(p) for p in glob.glob(os.path.join(_PATH_REQUIRE, "*.txt")))
# filter unwanted files
found_req_files = [n for n in found_req_files if n not in SKIP_REQ_FILES]
found_req_names = [os.path.splitext(req)[0].replace("datatype_", "") for req in found_req_files]
# define basic and extra extras
extras_req = {
name: _load_req(file_name=fname) for name, fname in zip(found_req_names, found_req_files) if "_" not in name
}
extras_req.update(
{
name: extras_req[name.split("_")[0]] + _load_req(file_name=fname)
for name, fname in zip(found_req_names, found_req_files)
if "_" in name
}
)
# some extra combinations
extras_req["vision"] = extras_req["image"] + extras_req["video"]
extras_req["all"] = extras_req["vision"] + extras_req["tabular"] + extras_req["text"]
extras_req["dev"] = extras_req["all"] + extras_req["test"] + extras_req["docs"]
# filter the uniques
extras_req = {n: list(set(req)) for n, req in extras_req.items()}
# https://packaging.python.org/discussions/install-requires-vs-requirements /
# keep the meta-data here for simplicity in reading this file... it's not obvious
# what happens and to non-engineers they won't know to look in init ...
# the goal of the project is simplicity for researchers, don't want to add too much
# engineer specific practices
setup(
name="lightning-flash",
version=about.__version__,
description=about.__docs__,
author=about.__author__,
author_email=about.__author_email__,
url=about.__homepage__,
download_url="https://github.com/PyTorchLightning/lightning-flash",
license=about.__license__,
packages=find_packages(exclude=["tests", "tests.*"]),
long_description=long_description,
long_description_content_type="text/markdown",
include_package_data=True,
extras_require=extras_req,
entry_points={
"console_scripts": ["flash=flash.__main__:main"],
},
zip_safe=False,
keywords=["deep learning", "pytorch", "AI"],
python_requires=">=3.6",
install_requires=base_req,
project_urls={
"Bug Tracker": "https://github.com/PyTorchLightning/lightning-flash/issues",
"Documentation": "https://lightning-flash.rtfd.io/en/latest/",
"Source Code": "https://github.com/PyTorchLightning/lightning-flash",
},
classifiers=[
"Environment :: Console",
"Natural Language :: English",
# How mature is this project? Common values are
# 3 - Alpha, 4 - Beta, 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
# Pick your license as you wish
# 'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)