@@ -94,35 +94,27 @@ def get_commits_since_tag(tag: str) -> list[dict]:
9494 return []
9595
9696
97- def analyze_with_openai (commits_summary : str , current_version : str ) -> dict :
98- """Use OpenAI to analyze commits."""
99- api_key = os .getenv ("OPENAI_API_KEY" )
100- if not api_key :
101- raise ValueError ("OPENAI_API_KEY not set" )
102-
103- model = os .getenv ("OPENAI_MODEL" , "gpt-4o" )
104-
105- from openai import OpenAI
106-
107- client = OpenAI (api_key = api_key )
108-
109- prompt = f"""You are analyzing git commits for a CUDA kernel library called FlashInfer to determine the appropriate semantic version bump.
97+ def build_analysis_prompt (commits_summary : str , current_version : str ) -> str :
98+ """Build the AI analysis prompt (shared by all AI providers)."""
99+ return f"""You are analyzing git commits for a CUDA kernel library called FlashInfer to determine the appropriate semantic version bump.
110100
111101Current version: { current_version }
112102
113- Semantic versioning rules for this project (from CONTRIBUTING.md):
114- - MAJOR increment: incompatible API changes (breaking changes to public APIs)
115- - MINOR increment: added functionality that is backwards-compatible (new kernels, new features, new SM support, etc.)
116- - PATCH increment: backwards-compatible bug fixes (both functional and performance fixes)
103+ Versioning rules for this project (from CONTRIBUTING.md):
104+ FlashInfer follows a "right-shifted" versioning scheme (major.minor.patch[.post1]):
105+ - MAJOR increment: architectural milestones and/or incompatible API changes (breaking changes to public APIs), similar to PyTorch 2.0
106+ - MINOR increment: significant backwards-compatible new features (major functionality additions)
107+ - PATCH increment: small backwards-compatible features (e.g. new kernels, new SM support, etc.) and backwards-compatible bug fixes
108+ - POST (e.g. .post1): optional suffix for quick follow-up release with just backwards-compatible bug fixes (not used in this analysis)
117109
118110Here are the commits since the last release:
119111
120112{ commits_summary }
121113
122114Please analyze these commits and determine:
123- 1. Whether there are any breaking API changes (MAJOR bump needed)
124- 2. Whether there are new features or backwards-compatible functionality additions (MINOR bump needed)
125- 3. Whether there are only bug fixes without new features (PATCH bump needed)
115+ 1. Whether there are any breaking API changes or architectural milestones (MAJOR bump needed)
116+ 2. Whether there are significant new features (MINOR bump needed)
117+ 3. Whether there are small features or bug fixes (PATCH bump needed)
1261184. If no significant changes, return "none"
127119
128120Respond in JSON format:
@@ -133,13 +125,35 @@ def analyze_with_openai(commits_summary: str, current_version: str) -> dict:
133125}}
134126
135127Important considerations:
128+ - New kernel implementations, new SM support, performance improvements are PATCH-level changes
129+ - MINOR bumps are for significant/major feature additions only, not incremental improvements
136130- Internal refactoring, test updates, documentation changes alone don't warrant a version bump
137- - Performance improvements are considered bug fixes (PATCH)
138- - New kernel implementations or new features are MINOR bumps
139131- API signature changes or removed functionality are MAJOR bumps
140132- Focus on changes that affect users of the library, not internal changes
141133"""
142134
135+
136+ def extract_json_from_response (text : str ) -> str :
137+ """Extract JSON from response that might be wrapped in markdown code blocks."""
138+ json_match = re .search (r"```(?:json)?\s*(\{.*?\})\s*```" , text , re .DOTALL )
139+ if json_match :
140+ return json_match .group (1 )
141+ return text
142+
143+
144+ def analyze_with_openai (commits_summary : str , current_version : str ) -> dict :
145+ """Use OpenAI to analyze commits."""
146+ api_key = os .getenv ("OPENAI_API_KEY" )
147+ if not api_key :
148+ raise ValueError ("OPENAI_API_KEY not set" )
149+
150+ model = os .getenv ("OPENAI_MODEL" , "gpt-4o" )
151+
152+ from openai import OpenAI
153+
154+ client = OpenAI (api_key = api_key )
155+ prompt = build_analysis_prompt (commits_summary , current_version )
156+
143157 response = client .chat .completions .create (
144158 model = model ,
145159 messages = [
@@ -168,40 +182,7 @@ def analyze_with_claude(commits_summary: str, current_version: str) -> dict:
168182 from anthropic import Anthropic
169183
170184 client = Anthropic (api_key = api_key )
171-
172- prompt = f"""You are analyzing git commits for a CUDA kernel library called FlashInfer to determine the appropriate semantic version bump.
173-
174- Current version: { current_version }
175-
176- Semantic versioning rules for this project (from CONTRIBUTING.md):
177- - MAJOR increment: incompatible API changes (breaking changes to public APIs)
178- - MINOR increment: added functionality that is backwards-compatible (new kernels, new features, new SM support, etc.)
179- - PATCH increment: backwards-compatible bug fixes (both functional and performance fixes)
180-
181- Here are the commits since the last release:
182-
183- { commits_summary }
184-
185- Please analyze these commits and determine:
186- 1. Whether there are any breaking API changes (MAJOR bump needed)
187- 2. Whether there are new features or backwards-compatible functionality additions (MINOR bump needed)
188- 3. Whether there are only bug fixes without new features (PATCH bump needed)
189- 4. If no significant changes, return "none"
190-
191- Respond in JSON format:
192- {{
193- "bump_type": "major|minor|patch|none",
194- "reasoning": "Detailed explanation of your decision",
195- "key_changes": ["list of most important changes that influenced the decision"]
196- }}
197-
198- Important considerations:
199- - Internal refactoring, test updates, documentation changes alone don't warrant a version bump
200- - Performance improvements are considered bug fixes (PATCH)
201- - New kernel implementations or new features are MINOR bumps
202- - API signature changes or removed functionality are MAJOR bumps
203- - Focus on changes that affect users of the library, not internal changes
204- """
185+ prompt = build_analysis_prompt (commits_summary , current_version )
205186
206187 response = client .messages .create (
207188 model = model ,
@@ -211,11 +192,7 @@ def analyze_with_claude(commits_summary: str, current_version: str) -> dict:
211192 )
212193
213194 result_text = response .content [0 ].text .strip ()
214-
215- # Extract JSON from response (might be wrapped in markdown code blocks)
216- json_match = re .search (r"```(?:json)?\s*(\{.*?\})\s*```" , result_text , re .DOTALL )
217- if json_match :
218- result_text = json_match .group (1 )
195+ result_text = extract_json_from_response (result_text )
219196
220197 return json .loads (result_text )
221198
@@ -232,39 +209,7 @@ def analyze_with_gemini(commits_summary: str, current_version: str) -> dict:
232209
233210 genai .configure (api_key = api_key )
234211
235- prompt = f"""You are analyzing git commits for a CUDA kernel library called FlashInfer to determine the appropriate semantic version bump.
236-
237- Current version: { current_version }
238-
239- Semantic versioning rules for this project (from CONTRIBUTING.md):
240- - MAJOR increment: incompatible API changes (breaking changes to public APIs)
241- - MINOR increment: added functionality that is backwards-compatible (new kernels, new features, new SM support, etc.)
242- - PATCH increment: backwards-compatible bug fixes (both functional and performance fixes)
243-
244- Here are the commits since the last release:
245-
246- { commits_summary }
247-
248- Please analyze these commits and determine:
249- 1. Whether there are any breaking API changes (MAJOR bump needed)
250- 2. Whether there are new features or backwards-compatible functionality additions (MINOR bump needed)
251- 3. Whether there are only bug fixes without new features (PATCH bump needed)
252- 4. If no significant changes, return "none"
253-
254- Respond in JSON format:
255- {{
256- "bump_type": "major|minor|patch|none",
257- "reasoning": "Detailed explanation of your decision",
258- "key_changes": ["list of most important changes that influenced the decision"]
259- }}
260-
261- Important considerations:
262- - Internal refactoring, test updates, documentation changes alone don't warrant a version bump
263- - Performance improvements are considered bug fixes (PATCH)
264- - New kernel implementations or new features are MINOR bumps
265- - API signature changes or removed functionality are MAJOR bumps
266- - Focus on changes that affect users of the library, not internal changes
267- """
212+ prompt = build_analysis_prompt (commits_summary , current_version )
268213
269214 model = genai .GenerativeModel (model_name )
270215
@@ -276,15 +221,44 @@ def analyze_with_gemini(commits_summary: str, current_version: str) -> dict:
276221 )
277222
278223 result_text = response .text .strip ()
279-
280- # Extract JSON from response (might be wrapped in markdown code blocks)
281- json_match = re .search (r"```(?:json)?\s*(\{.*?\})\s*```" , result_text , re .DOTALL )
282- if json_match :
283- result_text = json_match .group (1 )
224+ result_text = extract_json_from_response (result_text )
284225
285226 return json .loads (result_text )
286227
287228
229+ def try_ai_provider (
230+ provider_name : str ,
231+ analyzer_func ,
232+ commits_summary : str ,
233+ current_version : str ,
234+ model_env_var : str ,
235+ default_model : str ,
236+ install_package : str ,
237+ ):
238+ """
239+ Try to use an AI provider with standardized error handling.
240+
241+ Returns: (success: bool, result: dict or None)
242+ """
243+ try :
244+ print (f"Trying { provider_name } ..." , file = sys .stderr )
245+ result = analyzer_func (commits_summary , current_version )
246+ model = os .getenv (model_env_var , default_model )
247+ print (f"Successfully used { provider_name } (model: { model } )" , file = sys .stderr )
248+ return True , result
249+ except ImportError :
250+ print (
251+ f"{ provider_name } package not installed. Install with: pip install { install_package } " ,
252+ file = sys .stderr ,
253+ )
254+ except ValueError as e :
255+ print (f"{ provider_name } not available: { e } " , file = sys .stderr )
256+ except Exception as e :
257+ print (f"Error calling { provider_name } API: { e } " , file = sys .stderr )
258+
259+ return False , None
260+
261+
288262def analyze_with_ai (commits : list [dict ], current_version : str ) -> dict :
289263 """
290264 Use AI to analyze commits and determine version bump.
@@ -302,62 +276,38 @@ def analyze_with_ai(commits: list[dict], current_version: str) -> dict:
302276 ]
303277 )
304278
305- # Try OpenAI first
306- try :
307- print ("Trying OpenAI..." , file = sys .stderr )
308- result = analyze_with_openai (commits_summary , current_version )
309- print (
310- f"Successfully used OpenAI (model: { os .getenv ('OPENAI_MODEL' , 'gpt-4o' )} )" ,
311- file = sys .stderr ,
312- )
313- return result
314- except ImportError :
315- print (
316- "OpenAI package not installed. Install with: pip install openai" ,
317- file = sys .stderr ,
318- )
319- except ValueError as e :
320- print (f"OpenAI not available: { e } " , file = sys .stderr )
321- except Exception as e :
322- print (f"Error calling OpenAI API: { e } " , file = sys .stderr )
323-
324- # Try Claude second
325- try :
326- print ("Trying Anthropic Claude..." , file = sys .stderr )
327- result = analyze_with_claude (commits_summary , current_version )
328- print (
329- f"Successfully used Anthropic Claude (model: { os .getenv ('CLAUDE_MODEL' , 'claude-3-5-sonnet-20241022' )} )" ,
330- file = sys .stderr ,
331- )
332- return result
333- except ImportError :
334- print (
335- "Anthropic package not installed. Install with: pip install anthropic" ,
336- file = sys .stderr ,
337- )
338- except ValueError as e :
339- print (f"Claude not available: { e } " , file = sys .stderr )
340- except Exception as e :
341- print (f"Error calling Claude API: { e } " , file = sys .stderr )
342-
343- # Try Gemini third
344- try :
345- print ("Trying Google Gemini..." , file = sys .stderr )
346- result = analyze_with_gemini (commits_summary , current_version )
347- print (
348- f"Successfully used Google Gemini (model: { os .getenv ('GEMINI_MODEL' , 'gemini-2.0-flash-exp' )} )" ,
349- file = sys .stderr ,
350- )
351- return result
352- except ImportError :
353- print (
354- "Gemini package not installed. Install with: pip install google-generativeai" ,
355- file = sys .stderr ,
279+ # Define AI providers to try in order
280+ providers = [
281+ ("OpenAI" , analyze_with_openai , "OPENAI_MODEL" , "gpt-4o" , "openai" ),
282+ (
283+ "Anthropic Claude" ,
284+ analyze_with_claude ,
285+ "CLAUDE_MODEL" ,
286+ "claude-3-5-sonnet-20241022" ,
287+ "anthropic" ,
288+ ),
289+ (
290+ "Google Gemini" ,
291+ analyze_with_gemini ,
292+ "GEMINI_MODEL" ,
293+ "gemini-2.0-flash-exp" ,
294+ "google-generativeai" ,
295+ ),
296+ ]
297+
298+ # Try each provider in order
299+ for name , analyzer , model_env , default_model , package in providers :
300+ success , result = try_ai_provider (
301+ name ,
302+ analyzer ,
303+ commits_summary ,
304+ current_version ,
305+ model_env ,
306+ default_model ,
307+ package ,
356308 )
357- except ValueError as e :
358- print (f"Gemini not available: { e } " , file = sys .stderr )
359- except Exception as e :
360- print (f"Error calling Gemini API: { e } " , file = sys .stderr )
309+ if success :
310+ return result
361311
362312 # Fallback to basic analysis
363313 print (
0 commit comments