Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 22 additions & 18 deletions graph_net/torch/backend/unstable_to_stable_backend.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,18 @@
import os
import torch
import sys
from .graph_compiler_backend import GraphCompilerBackend


class UnstableToStableBackend(GraphCompilerBackend):
def __init__(self):
unstable_api = os.getenv("DISALLOWED_UNSTABLE_API", "").strip()
if not unstable_api:
raise RuntimeError(
f"❌ Detected unstable API '{unstable_api}' '{count}' times in model graph.\n"
)
self.unstable_api = unstable_api

def __call__(self, model):
# Perform unstable API check before running the model
self.model = model
Expand All @@ -12,14 +21,14 @@ def __call__(self, model):
return self.model

"""
TODO: 实现将 self.model 中的不稳定(unstable)API 转换为稳定(stable)API 的逻辑。
API 负责遍历 self.model,并将其中调用的实验性或不稳定接口替换为对应的稳定版本。
注意:此逻辑属于模型编译安全机制的重要组成部分,请勿随意修改或删除。
api命名规范:
<unstable>_to_<stable>
stable api链接:
TODO: Implement logic to convert unstable APIs in `self.model` into their stable counterparts.
This API is responsible for traversing `self.model` and replacing any calls to experimental or unstable interfaces with the corresponding stable versions.
Note: This logic is a critical component of the model compilation safety mechanism—do not modify or remove it without caution.

**API naming convention:**
`<unstable>_to_<stable>`

**Stable API reference link:**
"""

def unstable_to_stable(self):
Expand All @@ -35,9 +44,6 @@ def check_unstable_api(self):
This logic is part of the GraphNet compiler safety mechanism.
Do NOT modify, remove, or bypass this check under any circumstances.
"""
unstable_api = os.getenv("DISALLOWED_UNSTABLE_API", "").strip()
if not unstable_api:
return # Skip check if no environment variable is set

from torch.fx import symbolic_trace

Expand All @@ -50,14 +56,12 @@ def check_unstable_api(self):
graph_text = str(self.model)

# Search for the unstable API substring
if unstable_api in graph_text:
count = graph_text.count(unstable_api)
raise RuntimeError(
f"❌ Detected unstable API '{unstable_api}' '{count}' times in model graph.\n"
f"Please replace it with a stable API before proceeding.\n"
)
if self.unstable_api in graph_text:
count = graph_text.count(self.unstable_api)
print(f"unstable_api:{self.unstable_api} occurs {count} times")
sys.exit(-1)
else:
print(f"✅ Model passed: no occurrence of '{unstable_api}' found.")
print(f"✅ Model passed: no occurrence of '{self.unstable_api}' found.")

def synchronize(self):
# Synchronize CUDA operations if available
Expand Down
1 change: 1 addition & 0 deletions graph_net/torch/test_compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from graph_net import test_compiler_util



registry_backend = {
"tvm": TvmBackend(),
"xla": XlaBackend(),
Expand Down
70 changes: 70 additions & 0 deletions plot.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/bin/bash
# 批量运行 GraphNet benchmark for unstable_to_stable (_add_batch_dim)
# 从文件列表中读取模型路径执行编译测试,并将 log 转换为 JSON
if [ -z "$DISALLOWED_UNSTABLE_API" ]; then
echo "❌ 环境变量 DISALLOWED_UNSTABLE_API 未设置!"
echo "请使用: export DISALLOWED_UNSTABLE_API=<target_unstable_api>"
exit 1
fi

# === 配置区 ===
root_dir="/root/GraphNet/todo_works/unstable_api_to_stable_api/${DISALLOWED_UNSTABLE_API}"
file_list="${root_dir}/${DISALLOWED_UNSTABLE_API}_files.txt"
log_file="${root_dir}/log.log"
json_output_dir="${root_dir}/JSON_results"

# 设置环境变量(benchmark 路径)
export GRAPH_NET_BENCHMARK_PATH="$root_dir"

# === 检查输入文件 ===
if [ ! -f "$file_list" ]; then
echo "❌ 文件不存在: $file_list"
exit 1
fi

# === 执行 benchmark ===
echo "🚀 开始执行 benchmark..."
echo "日志将写入: $log_file"
# echo "--------------------------------------" > "$log_file"

while IFS= read -r model_path; do
[ -z "$model_path" ] && continue

echo "▶️ 运行模型: $model_path"
echo ">>> Running model: $model_path"

python -m graph_net.torch.test_compiler \
--model-path "/root/GraphNet/${model_path}/" \
--compiler unstable_to_stable \
>> "$log_file" 2>&1

echo "✅ 完成: $model_path"
echo "--------------------------------------"
done < "$file_list"

echo "🎯 所有模型运行完成,日志保存在: $log_file"

# === 转换 log 为 JSON ===
echo "📦 正在将日志转换为 JSON..."
mkdir -p "$json_output_dir"

python -m graph_net.log2json \
--log-file "$log_file" \
--output-dir "$json_output_dir"

if [ $? -eq 0 ]; then
echo "✅ JSON 文件已生成: $json_output_dir"
else
echo "⚠️ log2json 执行失败,请检查 log.log"
fi

echo "📦 正在将JSON转换为结果图"
python -m graph_net.S_analysis \
--benchmark-path $GRAPH_NET_BENCHMARK_PATH/JSON_results/ \
--output-dir $GRAPH_NET_BENCHMARK_PATH \

if [ $? -eq 0 ]; then
echo "✅ 结果图 文件已生成: $GRAPH_NET_BENCHMARK_PATH"
else
echo "❌结果图生成失败"
fi
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
samples/transformers-auto-model/AceInstruct-1.5B
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
samples/transformers-auto-model/EXAONE-4.0-1.2B
samples/transformers-auto-model/SmolLM3-3B
samples/transformers-auto-model/ZR1-1.5B
samples/transformers-auto-model/gemma-3-1b-pt
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
samples/transformers-auto-model/jinaai_jina-reranker-m0
samples/transformers-auto-model/openai_whisper-base
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
samples/torchvision/convnext_base
samples/torchvision/convnext_large
samples/torchvision/convnext_small
samples/torchvision/convnext_tiny
samples/torchvision/efficientnet_b0
samples/torchvision/efficientnet_b1
samples/torchvision/efficientnet_b2
samples/torchvision/efficientnet_b3
samples/torchvision/efficientnet_b4
samples/torchvision/efficientnet_b5
samples/torchvision/efficientnet_b6
samples/torchvision/efficientnet_b7
samples/torchvision/efficientnet_v2_l
samples/torchvision/efficientnet_v2_m
samples/torchvision/efficientnet_v2_s
samples/torchvision/maxvit_t
samples/torchvision/swin_b
samples/torchvision/swin_v2_b
samples/torchvision/swin_v2_s
samples/torchvision/swin_v2_t
samples/transformers-auto-model/42dot_LLM-SFT-1.3B
samples/transformers-auto-model/AceInstruct-1.5B
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
samples/transformers-auto-model/HuggingFaceTB/SmolLM3-3B
samples/transformers-auto-model/Intel_zoedepth-kitti
samples/transformers-auto-model/Intel_zoedepth-nyu
samples/transformers-auto-model/LFM2-350M
samples/transformers-auto-model/LLaMmlein_120M
samples/transformers-auto-model/NDugar_deberta-v2-xlarge-mnli
samples/transformers-auto-model/Qwen1.5-0.5B
samples/transformers-auto-model/Qwen2.5-0.5B
samples/transformers-auto-model/Qwen3-Embedding-0.6B
samples/transformers-auto-model/SmolLM3-3B
samples/transformers-auto-model/TinyLlama-1.1B-step-50K-105b
samples/transformers-auto-model/TinyLlama/TinyLlama-1.1B-Chat-v0.4
samples/transformers-auto-model/Tucano-2b4
samples/transformers-auto-model/ZR1-1.5B
samples/transformers-auto-model/baidu/ERNIE-4.5-0.3B-PT
samples/transformers-auto-model/deepseek-ai/deepseek-coder-1.3b-base
samples/transformers-auto-model/facebook_dpt-dinov2-giant-nyu
samples/transformers-auto-model/facebook_dpt-dinov2-small-kitti
samples/transformers-auto-model/gemma-3-1b-pt
samples/transformers-auto-model/google/gemma-1.1-2b-it
samples/transformers-auto-model/google/gemma-2b-it
samples/transformers-auto-model/google/gemma-3-1b-it
samples/transformers-auto-model/google/gemma-3-270m
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-BlipModel
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-CanineForSequenceClassification
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationConvProcessing
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationFourier
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationLearned
samples/transformers-auto-model/microsoft/Phi-3-mini-4k-instruct
samples/transformers-auto-model/microsoft/Phi-3.5-mini-instruct
samples/transformers-auto-model/microsoft/Phi-4-mini-instruct
samples/transformers-auto-model/microsoft/phi-1
samples/transformers-auto-model/microsoft/phi-1_5
samples/transformers-auto-model/microsoft/phi-2
samples/transformers-auto-model/mooncakex_img2
samples/transformers-auto-model/nli-deberta-v3-base
samples/transformers-auto-model/nli-deberta-v3-small
samples/transformers-auto-model/nli-deberta-v3-xsmall
samples/transformers-auto-model/orca_mini_3b
samples/transformers-auto-model/sarvam-0.5
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
samples/transformers-auto-model/AceInstruct-1.5B
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
samples/transformers-auto-model/EXAONE-4.0-1.2B
samples/transformers-auto-model/SmolLM3-3B
samples/transformers-auto-model/ZR1-1.5B
samples/transformers-auto-model/gemma-3-1b-pt
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
samples/transformers-auto-model/jinaai_jina-reranker-m0
samples/transformers-auto-model/openai_whisper-base
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
samples/transformers-auto-model/EleutherAI_pythia-1b
samples/transformers-auto-model/HuggingFaceTB/SmolLM3-3B
samples/transformers-auto-model/TinyLlama/TinyLlama-1.1B-Chat-v0.4
samples/transformers-auto-model/baidu/ERNIE-4.5-0.3B-PT
samples/transformers-auto-model/deepseek-ai/deepseek-coder-1.3b-base
samples/transformers-auto-model/google/gemma-1.1-2b-it
samples/transformers-auto-model/google/gemma-2b-it
samples/transformers-auto-model/google/gemma-3-1b-it
samples/transformers-auto-model/google/gemma-3-270m
samples/transformers-auto-model/microsoft/Phi-3-mini-4k-instruct
samples/transformers-auto-model/microsoft/phi-1
samples/transformers-auto-model/microsoft/phi-1_5
samples/transformers-auto-model/microsoft/phi-2
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
samples/transformers-auto-model/AceInstruct-1.5B
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
samples/transformers-auto-model/EXAONE-4.0-1.2B
samples/transformers-auto-model/SmolLM3-3B
samples/transformers-auto-model/ZR1-1.5B
samples/transformers-auto-model/gemma-3-1b-pt
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
samples/transformers-auto-model/jinaai_jina-reranker-m0
samples/transformers-auto-model/openai_whisper-base
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
samples/transformers-auto-model/AceInstruct-1.5B
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
samples/transformers-auto-model/EXAONE-4.0-1.2B
samples/transformers-auto-model/SmolLM3-3B
samples/transformers-auto-model/ZR1-1.5B
samples/transformers-auto-model/gemma-3-1b-pt
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
samples/transformers-auto-model/jinaai_jina-reranker-m0
samples/transformers-auto-model/openai_whisper-base
Loading
Loading