Skip to content

Commit

Permalink
0.2.1.5
Browse files Browse the repository at this point in the history
# integration
- add travis CI
- fix building parameters for python

# converter
- add half storage option for MNN converter
- fix op name lost in converter
- fix converter bug for print input output, identity remove output

# ops
- add quantized Convolution & Deconvolution support on OpenCL
- add more expression supports
- add DetectionPostProcess Op for TensorFlow Lite (ssd is supported directly now)
- add supports for LSTM & ELU for ONNX
- add support for Convolution that weights is not constant for ONNX
- fix Unary Op compile error on Linux
- fix Metal backend buffer reuse after resize
- fix Metal raw memory access after model releasing
- fix redundant transpose in Winograd generater
  • Loading branch information
liqing committed Nov 15, 2019
1 parent 09e792e commit e93e8dc
Show file tree
Hide file tree
Showing 102 changed files with 2,796 additions and 216 deletions.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
5 changes: 3 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,10 @@ set(MNN.Source_DIR
${MNN.Path}/shape
)
include_directories(
"include/"
"schema/current"
"include/"
"schema/current"
"3rd_party/flatbuffers/include"
"3rd_party/half"
)

if(MNN_METAL)
Expand Down
9 changes: 6 additions & 3 deletions MNN.podspec
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,10 @@ Pod::Spec.new do |s|
s.ios.deployment_target = '8.0'
s.requires_arc = true

s.source = { :git => "git@github.com:alibaba/MNN.git", :branch => 'master' }
s.source = { :git => "https://github.com/alibaba/MNN.git", :branch => 'master' }
s.frameworks = 'Metal', 'Accelerate'
s.library = 'c++'
s.prepare_command = 'schema/generate.sh'

s.subspec 'core' do |a|
a.source_files = \
Expand All @@ -52,15 +53,17 @@ Pod::Spec.new do |s|
end
s.subspec 'armv7' do |a|
a.source_files = 'source/backend/cpu/arm/arm32/*.{h,c,m,mm,cc,S,hpp,cpp}'
a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'}
end
s.subspec 'aarch64' do |a|
a.source_files = 'source/backend/cpu/arm/arm64/*.{h,c,m,mm,cc,S,hpp,cpp}'
a.pod_target_xcconfig = {'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)/source/backend/cpu/arm/"'}
end
s.subspec 'metal' do |a|
a.source_files = 'source/backend/metal/**/*.{h,c,m,mm,cc,hpp,cpp,metal}'
end

s.default_subspecs = 'core', 'armv7', 'aarch64', 'metal'
s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '"$(PODS_TARGET_SRCROOT)" "$(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include" ', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'}
s.user_target_xcconfig = { 'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a'}
s.pod_target_xcconfig = {'METAL_LIBRARY_FILE_BASE' => 'mnn', 'HEADER_SEARCH_PATHS' => '$(PODS_TARGET_SRCROOT)/include $(PODS_TARGET_SRCROOT)/3rd_party/flatbuffers/include $(PODS_TARGET_SRCROOT)/schema/current $(PODS_TARGET_SRCROOT)/source/core/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/ $(PODS_TARGET_SRCROOT)/source/backend/cpu/compute/ $(PODS_TARGET_SRCROOT)/source/math/ $(PODS_TARGET_SRCROOT)/3rd_party/half', 'GCC_PREPROCESSOR_DEFINITIONS' => '$(inherited) MNN_CODEGEN_REGISTER=1 MNN_SUPPORT_TFLITE_QUAN=1'}
s.user_target_xcconfig = {'OTHER_LDFLAGS' => '-force_load $(BUILD_DIR)/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)/MNN/libMNN.a'}
end
5 changes: 5 additions & 0 deletions express/include/Expr.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ class MNN_EXPRESS_PUBLIC Variable {
size_t linkNumber() const {
return mTo.size();
}

const std::list< std::pair<int, WeakEXPRP> >& toExprs() const{
return mTo;
}

private:
Variable(EXPRP expr, int index) {
mFrom = expr;
Expand Down
2 changes: 2 additions & 0 deletions express/include/MathOp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ MNN_EXPRESS_PUBLIC VARP _Mul(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Sub(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Add(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Div(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Min(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Max(VARP x, VARP y);
MNN_EXPRESS_PUBLIC VARP _Log(VARP x);
MNN_EXPRESS_PUBLIC VARP _Neg(VARP x);
MNN_EXPRESS_PUBLIC VARP _Rsqrt(VARP x);
Expand Down
12 changes: 9 additions & 3 deletions express/include/NeuralNetWorkOp.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,10 @@ MNN_EXPRESS_PUBLIC VARP _Scale(VARP x, int channels, std::vector<float>&& scales

MNN_EXPRESS_PUBLIC VARP _Relu(VARP x, float slope = 0.0f);
MNN_EXPRESS_PUBLIC VARP _Relu6(VARP x);
MNN_EXPRESS_PUBLIC VARP _PRelu(VARP x, std::vector<float> &&slopes);
MNN_EXPRESS_PUBLIC VARP _Softmax(VARP x, int axis);
MNN_EXPRESS_PUBLIC std::vector<VARP> _Slice(VARP x, INTS points, int axis);
MNN_EXPRESS_PUBLIC std::vector<VARP> _Split(VARP x, INTS points, int axis);
MNN_EXPRESS_PUBLIC VARP _Slice(VARP x, VARP starts, VARP sizes);
MNN_EXPRESS_PUBLIC VARP _Concat(VARPS xs, int axis);
MNN_EXPRESS_PUBLIC VARP _Convert(VARP x, Dimensionformat dest);
MNN_EXPRESS_PUBLIC VARP _Transpose(VARP x, INTS perm);
Expand All @@ -51,13 +53,17 @@ MNN_EXPRESS_PUBLIC VARP _Resize(VARP x, float xScale, float yScale);
MNN_EXPRESS_PUBLIC VARP _Pad(VARP x, VARP pads);
MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, int axis);
MNN_EXPRESS_PUBLIC VARP _ExpandDims(VARP x, VARP axis);


MNN_EXPRESS_PUBLIC VARP _Shape(VARP x);
MNN_EXPRESS_PUBLIC VARP _Pack(VARPS xs, halide_type_t dtype, int axis);
enum InterpolationMethod {BILINEAR, NEAREST};
MNN_EXPRESS_PUBLIC VARP _CropAndResize(VARP image, VARP boxes, VARP indexes, VARP sizes, float extrapolation, InterpolationMethod method);
MNN_EXPRESS_PUBLIC VARP _Fill(VARP s, VARP v);
MNN_EXPRESS_PUBLIC VARP _Tile(VARP x, VARP mul);
MNN_EXPRESS_PUBLIC VARP _Gather(VARP embedding, VARP indices);
MNN_EXPRESS_PUBLIC VARP _GatherV2(VARP params, VARP indices, VARP axis = nullptr);

MNN_EXPRESS_PUBLIC VARP _Squeeze(VARP x, INTS axes = {});
MNN_EXPRESS_PUBLIC VARP _Unsqueeze(VARP x, INTS axes = {});

} // namespace Express
} // namespace MNN
2 changes: 1 addition & 1 deletion express/source/Expr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ bool Expr::requireInfo() {
return false;
}
mInside->mInputInfos[i] = mInputs[i]->getInfo();
if (nullptr == mInside->mInputInfos[i] && OpType_Concat != mOp->type()) {
if (nullptr == mInside->mInputInfos[i] && (!mInside->mReq.supportError[i])) {
#ifdef MNN_EXPRESS_ERROR_REPORT
MNN_ERROR("%s, %d input not ready\n", mName.c_str(), i);
#endif
Expand Down
6 changes: 6 additions & 0 deletions express/source/MathOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,12 @@ VARP _Sub(VARP x, VARP y) {
VARP _Add(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_ADD);
}
VARP _Min(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_MINIMUM);
}
VARP _Max(VARP x, VARP y) {
return _Binary(x, y, BinaryOpOperation_MAXIMUM);
}
VARP _Neg(VARP x) {
return _Unary(x, UnaryOpOperation_NEG);
}
Expand Down
49 changes: 48 additions & 1 deletion express/source/NeuralNetWorkOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,15 @@ VARP _Relu6(VARP x) {
relu->type = OpType_ReLU6;
return (Variable::create(Expr::create(relu.get(), {x})));
}
VARP _PRelu(VARP x, std::vector<float> &&slopes) {
std::unique_ptr<OpT> prelu(new OpT);
prelu->type = OpType_PReLU;
prelu->main.type = OpParameter_PRelu;
prelu->main.value = new PReluT;
prelu->main.AsPRelu()->slope = slopes;
prelu->main.AsPRelu()->slopeCount = slopes.size();
return (Variable::create(Expr::create(prelu.get(), {x})));
}

VARP _Softmax(VARP x, int axis) {
std::unique_ptr<OpT> softmax(new OpT);
Expand Down Expand Up @@ -335,7 +344,7 @@ VARP _Convert(VARP x, Dimensionformat dest) {
return (Variable::create(Expr::create(convert.get(), {x})));
}

std::vector<VARP> _Slice(VARP x, INTS points, int axis) {
std::vector<VARP> _Split(VARP x, INTS points, int axis) {
MNN_ASSERT(points.size() >= 1);
std::unique_ptr<OpT> op(new OpT);
op->type = OpType_Slice;
Expand All @@ -354,6 +363,12 @@ std::vector<VARP> _Slice(VARP x, INTS points, int axis) {
return res;
}

VARP _Slice(VARP x, VARP starts, VARP sizes) {
std::unique_ptr<OpT> slice(new OpT);
slice->type = OpType_SliceTf;
return (Variable::create(Expr::create(slice.get(), {x, starts, sizes})));
}

VARP _Transpose(VARP x, INTS perm) {
auto permVar = _Const((const void*)perm.data(), {static_cast<int>(perm.size())}, NHWC, halide_type_of<int>());
return _Transpose(x, permVar);
Expand Down Expand Up @@ -508,6 +523,11 @@ VARP _ExpandDims(VARP x, VARP axis) {
return (Variable::create(Expr::create(std::move(expand), {x, axis})));
}

VARP _Shape(VARP x) {
std::unique_ptr<OpT> shape(new OpT);
shape->type = OpType_Shape;
return (Variable::create(Expr::create(std::move(shape), {x})));
}
VARP _Pack(VARPS xs, halide_type_t dtype, int axis) {
std::unique_ptr<OpT> pack(new OpT);
pack->type = OpType_Pack;
Expand Down Expand Up @@ -546,6 +566,12 @@ VARP _Tile(VARP x, VARP mul) {
tile->type = OpType_Tile;
return (Variable::create(Expr::create(std::move(tile), {x, mul})));
}
VARP _Gather(VARP embedding, VARP indices) {
std::unique_ptr<OpT> gather(new OpT);
gather->type = OpType_Gather;
gather->main.value = new GatherT;
return (Variable::create(Expr::create(std::move(gather), {embedding, indices})));
}
VARP _GatherV2(VARP params, VARP indices, VARP axis) {
std::unique_ptr<OpT> gather(new OpT);
gather->type = OpType_GatherV2;
Expand All @@ -556,5 +582,26 @@ VARP _GatherV2(VARP params, VARP indices, VARP axis) {
return (Variable::create(Expr::create(std::move(gather), {params, indices})));
}
}

VARP _Squeeze(VARP x, INTS axes){
std::unique_ptr<OpT> squeeze(new OpT);
squeeze->type = OpType_Squeeze;
auto squeezeParam = new SqueezeParamT;
squeezeParam->squeezeDims = axes;
squeeze->main.type = OpParameter_SqueezeParam;
squeeze->main.value = squeezeParam;
return Variable::create(Expr::create(std::move(squeeze), {x}));
}

VARP _Unsqueeze(VARP x, INTS axes){
std::unique_ptr<OpT> squeeze(new OpT);
squeeze->type = OpType_Unsqueeze;
auto squeezeParam = new SqueezeParamT;
squeezeParam->squeezeDims = axes;
squeeze->main.type = OpParameter_SqueezeParam;
squeeze->main.value = squeezeParam;
return Variable::create(Expr::create(std::move(squeeze), {x}));
}

} // namespace Express
} // namespace MNN
18 changes: 16 additions & 2 deletions project/ios/MNN.xcodeproj/project.pbxproj
Original file line number Diff line number Diff line change
Expand Up @@ -765,6 +765,9 @@
EBB38F3621E748B9005F76D7 /* ShapeSpaceToBatchND.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFD21E748B9005F76D7 /* ShapeSpaceToBatchND.cpp */; };
EBB38F3721E748B9005F76D7 /* ShapePack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFE21E748B9005F76D7 /* ShapePack.cpp */; };
EBB38F3821E748B9005F76D7 /* ShapeDeconvolution.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBB38EFF21E748B9005F76D7 /* ShapeDeconvolution.cpp */; };
EBD9FF12236A939700E188F5 /* ShapeDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */; };
EBD9FF15236A93AB00E188F5 /* CPUDetectionPostProcess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */; };
EBD9FF16236A93AB00E188F5 /* CPUDetectionPostProcess.hpp in Headers */ = {isa = PBXBuildFile; fileRef = EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */; };
/* End PBXBuildFile section */

/* Begin PBXContainerItemProxy section */
Expand Down Expand Up @@ -1571,6 +1574,9 @@
EBB38EFD21E748B9005F76D7 /* ShapeSpaceToBatchND.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeSpaceToBatchND.cpp; sourceTree = "<group>"; };
EBB38EFE21E748B9005F76D7 /* ShapePack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapePack.cpp; sourceTree = "<group>"; };
EBB38EFF21E748B9005F76D7 /* ShapeDeconvolution.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDeconvolution.cpp; sourceTree = "<group>"; };
EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ShapeDetectionPostProcess.cpp; sourceTree = "<group>"; };
EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CPUDetectionPostProcess.cpp; sourceTree = "<group>"; };
EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = CPUDetectionPostProcess.hpp; sourceTree = "<group>"; };
/* End PBXFileReference section */

/* Begin PBXFrameworksBuildPhase section */
Expand Down Expand Up @@ -1945,6 +1951,8 @@
48887410215B639D0079B12E /* cpu */ = {
isa = PBXGroup;
children = (
EBD9FF13236A93AB00E188F5 /* CPUDetectionPostProcess.cpp */,
EBD9FF14236A93AB00E188F5 /* CPUDetectionPostProcess.hpp */,
48057D8B2330E85C00F922BE /* CPUMatrixBandPart.cpp */,
48057D8A2330E85C00F922BE /* CPUMatrixBandPart.hpp */,
48057D872330A90900F922BE /* CPUGatherND.cpp */,
Expand Down Expand Up @@ -2559,6 +2567,7 @@
EBB38EC621E748B9005F76D7 /* shape */ = {
isa = PBXGroup;
children = (
EBD9FF11236A939700E188F5 /* ShapeDetectionPostProcess.cpp */,
48057D842330A8F900F922BE /* ShapeGatherND.cpp */,
C422D73D2326449500FD59D0 /* ShapePool3D.cpp */,
C422D737232634DD00FD59D0 /* ShapeConvolution3D.cpp */,
Expand Down Expand Up @@ -2683,6 +2692,7 @@
4841B60D21EC607E002E5D66 /* CPUQuantizedLogistic.hpp in Headers */,
92D765AD2228188700178BE5 /* Session.hpp in Headers */,
CE96FE7321707D58004AB400 /* MetalSigmoid.hpp in Headers */,
EBD9FF16236A93AB00E188F5 /* CPUDetectionPostProcess.hpp in Headers */,
48887612215B639F0079B12E /* CPUSigmoid.hpp in Headers */,
4888767D215B639F0079B12E /* MNNAsmGlobal.h in Headers */,
4843AA5B22A7E9AB00889A63 /* CPUSoftmaxGrad.hpp in Headers */,
Expand Down Expand Up @@ -3365,6 +3375,7 @@
925A89122223951200D22428 /* MetalConvolutionActivation.metal in Sources */,
EBB38F1821E748B9005F76D7 /* ShapeNonMaxSuppressionV2.cpp in Sources */,
48BF21F421CA43AE00AFF78E /* MNNSamplerC4NearestOpt.S in Sources */,
EBD9FF15236A93AB00E188F5 /* CPUDetectionPostProcess.cpp in Sources */,
488876AA215B639F0079B12E /* MNNGemmInt16to32_4x4_Common.S in Sources */,
92369E64222544FE009D3A05 /* MetalConvolutionGEMM.metal in Sources */,
EBB38F3021E748B9005F76D7 /* ShapeProposal.cpp in Sources */,
Expand Down Expand Up @@ -3475,6 +3486,7 @@
924F131921A81C74006D46A4 /* MetalTranspose.mm in Sources */,
488876D0215B639F0079B12E /* MNNWinogradMatrixProductRight.S in Sources */,
48070742231E512D00528CE5 /* NeuralNetWorkOp.cpp in Sources */,
EBD9FF12236A939700E188F5 /* ShapeDetectionPostProcess.cpp in Sources */,
4807073F231E512D00528CE5 /* InsideExpr.cpp in Sources */,
48A8A63721D8A43D00C2B9A7 /* BufferAllocator.cpp in Sources */,
92D765AF2228188700178BE5 /* Interpreter.cpp in Sources */,
Expand Down Expand Up @@ -3719,6 +3731,7 @@
HEADER_SEARCH_PATHS = (
"${inherited}",
"${SRCROOT}/../../3rd_party/flatbuffers/include",
"${SRCROOT}/../../3rd_party/half",
);
INFOPLIST_FILE = "$(SRCROOT)/MNN/Info.plist";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
Expand Down Expand Up @@ -3763,6 +3776,7 @@
HEADER_SEARCH_PATHS = (
"${inherited}",
"${SRCROOT}/../../3rd_party/flatbuffers/include",
"${SRCROOT}/../../3rd_party/half",
);
INFOPLIST_FILE = "$(SRCROOT)/MNN/Info.plist";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
Expand Down Expand Up @@ -3792,7 +3806,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
CODE_SIGN_STYLE = Automatic;
DEVELOPMENT_TEAM = 6G7464HHUS;
DEVELOPMENT_TEAM = "";
HEADER_SEARCH_PATHS = (
"${inherited}",
"${SRCROOT}/../../3rd_party/flatbuffers/include",
Expand All @@ -3813,7 +3827,7 @@
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
ASSETCATALOG_COMPILER_LAUNCHIMAGE_NAME = LaunchImage;
CODE_SIGN_STYLE = Automatic;
DEVELOPMENT_TEAM = 6G7464HHUS;
DEVELOPMENT_TEAM = "";
HEADER_SEARCH_PATHS = (
"${inherited}",
"${SRCROOT}/../../3rd_party/flatbuffers/include",
Expand Down
16 changes: 2 additions & 14 deletions pymnn/pip_package/build_deps.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,22 +18,10 @@ def build_deps():
os.makedirs(cmake_build_dir)
os.chdir(cmake_build_dir)
if IS_WINDOWS:
os.system('cmake -G "Ninja" -DMNN_BUILD_QUANTOOLS=ON\
os.system('cmake -G "Ninja" -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=on\
-DMNN_BUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release .. && ninja')
else:
os.system('cmake -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_SHARED_LIBS=OFF .. && make -j4')
#build_converter_project
converter_dir = os.path.join(root_dir, "tools", "converter")
converter_build_dir = os.path.join(converter_dir, BUILD_DIR)
if os.path.exists(converter_build_dir):
shutil.rmtree(converter_build_dir)
os.makedirs(converter_build_dir)
os.chdir(converter_build_dir)
if IS_WINDOWS:
os.system('cmake -G "Ninja" -DMNN_BUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release .. && ninja')
else:
os.system('cmake -DMNN_BUILD_SHARED_LIBS=OFF .. && make -j4')
os.chdir(root_dir)
os.system('cmake -DMNN_BUILD_QUANTOOLS=ON -DMNN_BUILD_CONVERTER=on -DMNN_BUILD_SHARED_LIBS=OFF .. && make -j4')
################################################################################
# Building dependent libraries
################################################################################
Expand Down
6 changes: 3 additions & 3 deletions pymnn/pip_package/build_wheel.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@
""" build wheel tool """
from __future__ import print_function
import os
import shutil
import platform
IS_WINDOWS = (platform.system() == 'Windows')
IS_DARWIN = (platform.system() == 'Darwin')
IS_LINUX = (platform.system() == 'Linux')
if __name__ == '__main__':
if os.path.exists('build'):
shutil.rmtree('build')
if IS_DARWIN:
os.system('rm -rf build')
os.system('python setup.py bdist_wheel')
if IS_LINUX:
os.system('rm -rf build')
os.system('python setup.py bdist_wheel --plat-name=manylinux1_x86_64')
if IS_WINDOWS:
os.system('rm -r -force build')
os.system('python setup.py bdist_wheel')
Loading

0 comments on commit e93e8dc

Please sign in to comment.