|
| 1 | +/* |
| 2 | + * Licensed to the Apache Software Foundation (ASF) under one |
| 3 | + * or more contributor license agreements. See the NOTICE file |
| 4 | + * distributed with this work for additional information |
| 5 | + * regarding copyright ownership. The ASF licenses this file |
| 6 | + * to you under the Apache License, Version 2.0 (the |
| 7 | + * "License"); you may not use this file except in compliance |
| 8 | + * with the License. You may obtain a copy of the License at |
| 9 | + * |
| 10 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | + * |
| 12 | + * Unless required by applicable law or agreed to in writing, |
| 13 | + * software distributed under the License is distributed on an |
| 14 | + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 15 | + * KIND, either express or implied. See the License for the |
| 16 | + * specific language governing permissions and limitations |
| 17 | + * under the License. |
| 18 | + */ |
| 19 | + |
| 20 | +#include <gmock/gmock.h> |
| 21 | +#include <gtest/gtest.h> |
| 22 | +#include <tvm/driver/driver_api.h> |
| 23 | +#include <tvm/ir/type.h> |
| 24 | +#include <tvm/node/reflection.h> |
| 25 | +#include <tvm/runtime/metadata.h> |
| 26 | +#include <tvm/runtime/module.h> |
| 27 | +#include <tvm/target/target.h> |
| 28 | +#include <tvm/te/operation.h> |
| 29 | + |
| 30 | +TEST(CCodegen, MainFunctionOrder) { |
| 31 | + using namespace tvm; |
| 32 | + using namespace tvm::te; |
| 33 | + |
| 34 | + std::string tvm_module_main = std::string(runtime::symbol::tvm_module_main); |
| 35 | + |
| 36 | + tvm::Target target_c = tvm::Target("c -keys=cpu -link-params=0"); |
| 37 | + |
| 38 | + const int n = 4; |
| 39 | + Array<PrimExpr> shape{n}; |
| 40 | + |
| 41 | + auto A = placeholder(shape, DataType::Float(32), "A"); |
| 42 | + auto B = placeholder(shape, DataType::Float(32), "B"); |
| 43 | + |
| 44 | + auto elemwise_add = compute( |
| 45 | + A->shape, [&A, &B](PrimExpr i) { return A[i] + B[i]; }, "elemwise_add"); |
| 46 | + |
| 47 | + auto fcreate = [=]() { |
| 48 | + With<Target> llvm_scope(target_c); |
| 49 | + return create_schedule({elemwise_add->op}); |
| 50 | + }; |
| 51 | + |
| 52 | + auto args = Array<Tensor>({A, B, elemwise_add}); |
| 53 | + |
| 54 | + std::unordered_map<Tensor, Buffer> binds; |
| 55 | + auto lowered = LowerSchedule(fcreate(), args, "elemwise_add", binds); |
| 56 | + Map<tvm::Target, IRModule> inputs = {{target_c, lowered}}; |
| 57 | + runtime::Module module = build(inputs, Target()); |
| 58 | + Array<String> functions = module->GetFunction("get_func_names", false)(); |
| 59 | + |
| 60 | + ICHECK(functions.back().compare(tvm_module_main) == 0); |
| 61 | +} |
| 62 | + |
| 63 | +auto BuildLowered(std::string op_name, tvm::Target target) { |
| 64 | + using namespace tvm; |
| 65 | + using namespace tvm::te; |
| 66 | + |
| 67 | + // The shape of input tensors. |
| 68 | + const int n = 4; |
| 69 | + Array<PrimExpr> shape{n}; |
| 70 | + |
| 71 | + auto A = placeholder(shape, DataType::Float(32), "A"); |
| 72 | + auto B = placeholder(shape, DataType::Float(32), "B"); |
| 73 | + |
| 74 | + auto op = compute( |
| 75 | + A->shape, [&A, &B](PrimExpr i) { return A[i] + B[i]; }, op_name); |
| 76 | + |
| 77 | + auto fcreate_s = [=]() { |
| 78 | + With<Target> llvm_scope(target); |
| 79 | + return create_schedule({op->op}); |
| 80 | + }; |
| 81 | + |
| 82 | + auto args = Array<Tensor>({A, B, op}); |
| 83 | + std::unordered_map<Tensor, Buffer> binds; |
| 84 | + auto lowered_s = LowerSchedule(fcreate_s(), args, op_name, binds); |
| 85 | + return lowered_s; |
| 86 | +} |
| 87 | + |
| 88 | +bool IsSorted(tvm::Map<tvm::Target, tvm::IRModule> inputs) { |
| 89 | + std::vector<std::string> schedule_names; |
| 90 | + for (auto const& module : inputs) { |
| 91 | + for (auto const& func : module.second->functions) { |
| 92 | + schedule_names.push_back(func.first->name_hint); |
| 93 | + } |
| 94 | + } |
| 95 | + return std::is_sorted(schedule_names.begin(), schedule_names.end()); |
| 96 | +} |
| 97 | + |
| 98 | +TEST(CCodegen, FunctionOrder) { |
| 99 | + using testing::_; |
| 100 | + using testing::ElementsAre; |
| 101 | + using testing::StrEq; |
| 102 | + using namespace tvm; |
| 103 | + using namespace tvm::te; |
| 104 | + |
| 105 | + Target target = Target("c -keys=cpu -link-params=0"); |
| 106 | + |
| 107 | + // add schedules in reverse order |
| 108 | + Map<tvm::Target, IRModule> inputs; |
| 109 | + inputs.Set(Target("c -keys=cpu -link-params=0"), BuildLowered("op_2", target)); |
| 110 | + inputs.Set(Target("c -keys=cpu -link-params=0"), BuildLowered("op_1", target)); |
| 111 | + |
| 112 | + for (uint32_t counter = 99; IsSorted(inputs) && counter > 0; counter--) { |
| 113 | + std::string op_name = "op_" + std::to_string(counter); |
| 114 | + inputs.Set(Target("c -keys=cpu -link-params=0"), BuildLowered(op_name, target)); |
| 115 | + } |
| 116 | + |
| 117 | + EXPECT_FALSE(IsSorted(inputs)); |
| 118 | + |
| 119 | + auto module = build(inputs, Target()); |
| 120 | + Array<String> func_array = module->GetFunction("get_func_names", false)(); |
| 121 | + std::vector<std::string> functions{func_array.begin(), func_array.end()}; |
| 122 | + EXPECT_THAT(functions, ElementsAre(StrEq("op_1"), _, StrEq("op_2"), _)); |
| 123 | +} |
0 commit comments