-
-
Notifications
You must be signed in to change notification settings - Fork 6.4k
[Feature] UI - Blog Dropdown in Navbar #21859
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
e37d54e
be1a543
7e5c63c
021a909
4b1ce1f
ca9111e
70f8e97
a82c8c1
98da524
e241e6f
1c0cfd7
929d592
a0965d5
1ecfbad
94425df
08f4a27
54b7e1a
09cc3b8
9f3fc49
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,10 @@ | ||
| { | ||
| "posts": [ | ||
| { | ||
| "title": "Incident Report: SERVER_ROOT_PATH regression broke UI routing", | ||
| "description": "How a single line removal caused UI 404s for all deployments using SERVER_ROOT_PATH, and the tests we added to prevent it from happening again.", | ||
| "date": "2026-02-21", | ||
| "url": "https://docs.litellm.ai/blog/server-root-path-incident" | ||
| } | ||
| ] | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,128 @@ | ||
| """ | ||
| Pulls the latest LiteLLM blog posts from GitHub. | ||
|
|
||
| Falls back to the bundled local backup on any failure. | ||
| GitHub JSON URL is configured via litellm.blog_posts_url (or LITELLM_BLOG_POSTS_URL env var). | ||
|
|
||
| Disable remote fetching entirely: | ||
| export LITELLM_LOCAL_BLOG_POSTS=True | ||
| """ | ||
|
|
||
| import json | ||
| import os | ||
| import time | ||
| from importlib.resources import files | ||
| from typing import Any, Dict, List, Optional | ||
|
|
||
| import httpx | ||
| from pydantic import BaseModel | ||
|
|
||
| from litellm import verbose_logger | ||
|
|
||
| BLOG_POSTS_TTL_SECONDS: int = 3600 # 1 hour | ||
|
|
||
|
|
||
| class BlogPost(BaseModel): | ||
| title: str | ||
| description: str | ||
| date: str | ||
| url: str | ||
|
|
||
|
|
||
| class BlogPostsResponse(BaseModel): | ||
| posts: List[BlogPost] | ||
|
|
||
|
|
||
| class GetBlogPosts: | ||
| """ | ||
| Fetches, validates, and caches LiteLLM blog posts. | ||
|
|
||
| Mirrors the structure of GetModelCostMap: | ||
| - Fetches from GitHub with a 5-second timeout | ||
| - Validates the response has a non-empty ``posts`` list | ||
| - Caches the result in-process for BLOG_POSTS_TTL_SECONDS (1 hour) | ||
| - Falls back to the bundled local backup on any failure | ||
| """ | ||
|
|
||
| _cached_posts: Optional[List[Dict[str, str]]] = None | ||
| _last_fetch_time: float = 0.0 | ||
|
|
||
| @staticmethod | ||
| def load_local_blog_posts() -> List[Dict[str, str]]: | ||
| """Load the bundled local backup blog posts.""" | ||
| content = json.loads( | ||
| files("litellm") | ||
| .joinpath("blog_posts.json") | ||
| .read_text(encoding="utf-8") | ||
| ) | ||
| return content.get("posts", []) | ||
|
|
||
| @staticmethod | ||
| def fetch_remote_blog_posts(url: str, timeout: int = 5) -> dict: | ||
| """ | ||
| Fetch blog posts JSON from a remote URL. | ||
|
|
||
| Returns the parsed response. Raises on network/parse errors. | ||
| """ | ||
| response = httpx.get(url, timeout=timeout) | ||
| response.raise_for_status() | ||
| return response.json() | ||
|
|
||
| @staticmethod | ||
| def validate_blog_posts(data: Any) -> bool: | ||
| """Return True if data is a dict with a non-empty ``posts`` list.""" | ||
| if not isinstance(data, dict): | ||
| verbose_logger.warning( | ||
| "LiteLLM: Blog posts response is not a dict (type=%s). " | ||
| "Falling back to local backup.", | ||
| type(data).__name__, | ||
| ) | ||
| return False | ||
| posts = data.get("posts") | ||
| if not isinstance(posts, list) or len(posts) == 0: | ||
| verbose_logger.warning( | ||
| "LiteLLM: Blog posts response has no valid 'posts' list. " | ||
| "Falling back to local backup.", | ||
| ) | ||
| return False | ||
| return True | ||
|
|
||
| @classmethod | ||
| def get_blog_posts(cls, url: str) -> List[Dict[str, str]]: | ||
| """ | ||
| Return the blog posts list. | ||
|
|
||
| Uses the in-process cache if within BLOG_POSTS_TTL_SECONDS. | ||
| Fetches from ``url`` otherwise, falling back to local backup on failure. | ||
| """ | ||
| if os.getenv("LITELLM_LOCAL_BLOG_POSTS", "").lower() == "true": | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| now = time.time() | ||
| cached = cls._cached_posts | ||
| if cached is not None and (now - cls._last_fetch_time) < BLOG_POSTS_TTL_SECONDS: | ||
| return cached | ||
|
|
||
| try: | ||
| data = cls.fetch_remote_blog_posts(url) | ||
| except Exception as e: | ||
| verbose_logger.warning( | ||
| "LiteLLM: Failed to fetch blog posts from %s: %s. " | ||
| "Falling back to local backup.", | ||
| url, | ||
| str(e), | ||
| ) | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| if not cls.validate_blog_posts(data): | ||
| return cls.load_local_blog_posts() | ||
|
|
||
| posts = data["posts"] | ||
| cls._cached_posts = posts | ||
| cls._last_fetch_time = now | ||
| return posts | ||
|
|
||
|
|
||
| def get_blog_posts(url: str) -> List[Dict[str, str]]: | ||
| """Public entry point — returns the blog posts list.""" | ||
| return GetBlogPosts.get_blog_posts(url=url) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,8 +2,16 @@ | |
| import os | ||
| from typing import List | ||
|
|
||
| import litellm | ||
| from fastapi import APIRouter, Depends, HTTPException | ||
|
|
||
| from litellm._logging import verbose_logger | ||
| from litellm.litellm_core_utils.get_blog_posts import ( | ||
| BlogPost, | ||
| BlogPostsResponse, | ||
| GetBlogPosts, | ||
| get_blog_posts, | ||
| ) | ||
| from litellm.proxy._types import CommonProxyErrors | ||
| from litellm.proxy.auth.user_api_key_auth import user_api_key_auth | ||
| from litellm.types.agents import AgentCard | ||
|
|
@@ -193,6 +201,30 @@ async def get_litellm_model_cost_map(): | |
| ) | ||
|
|
||
|
|
||
| @router.get( | ||
| "/public/litellm_blog_posts", | ||
| tags=["public"], | ||
| response_model=BlogPostsResponse, | ||
| ) | ||
| async def get_litellm_blog_posts(): | ||
| """ | ||
| Public endpoint to get the latest LiteLLM blog posts. | ||
|
|
||
| Fetches from GitHub with a 1-hour in-process cache. | ||
| Falls back to the bundled local backup on any failure. | ||
| """ | ||
| try: | ||
| posts_data = get_blog_posts(url=litellm.blog_posts_url) | ||
| except Exception as e: | ||
| verbose_logger.warning( | ||
| "LiteLLM: get_litellm_blog_posts endpoint fallback triggered: %s", str(e) | ||
| ) | ||
| posts_data = GetBlogPosts.load_local_blog_posts() | ||
|
|
||
| posts = [BlogPost(**p) for p in posts_data[:5]] | ||
| return BlogPostsResponse(posts=posts) | ||
|
Comment on lines
+209
to
+225
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Synchronous HTTP call blocks the async event loop
Unlike Consider either:
import asyncio
posts_data = await asyncio.to_thread(get_blog_posts, url=litellm.blog_posts_url)
|
||
|
|
||
|
|
||
| @router.get( | ||
| "/public/agents/fields", | ||
| tags=["public", "[beta] Agents"], | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,80 @@ | ||
| """Tests for the /public/litellm_blog_posts endpoint.""" | ||
| from unittest.mock import patch | ||
|
|
||
| import pytest | ||
| from fastapi.testclient import TestClient | ||
|
|
||
| SAMPLE_POSTS = [ | ||
| { | ||
| "title": "Test Post", | ||
| "description": "A test post.", | ||
| "date": "2026-01-01", | ||
| "url": "https://www.litellm.ai/blog/test", | ||
| } | ||
| ] | ||
|
|
||
|
|
||
| @pytest.fixture | ||
| def client(): | ||
| """Create a TestClient with just the public_endpoints router.""" | ||
| from fastapi import FastAPI | ||
|
|
||
| from litellm.proxy.public_endpoints.public_endpoints import router | ||
|
|
||
| app = FastAPI() | ||
| app.include_router(router) | ||
| return TestClient(app) | ||
|
|
||
|
|
||
| def test_get_blog_posts_returns_response_shape(client): | ||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| return_value=SAMPLE_POSTS, | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| assert response.status_code == 200 | ||
| data = response.json() | ||
| assert "posts" in data | ||
| assert len(data["posts"]) == 1 | ||
| post = data["posts"][0] | ||
| assert post["title"] == "Test Post" | ||
| assert post["description"] == "A test post." | ||
| assert post["date"] == "2026-01-01" | ||
| assert post["url"] == "https://www.litellm.ai/blog/test" | ||
|
|
||
|
|
||
| def test_get_blog_posts_limits_to_five(client): | ||
| """Endpoint returns at most 5 posts.""" | ||
| many_posts = [ | ||
| { | ||
| "title": f"Post {i}", | ||
| "description": "desc", | ||
| "date": "2026-01-01", | ||
| "url": f"https://www.litellm.ai/blog/{i}", | ||
| } | ||
| for i in range(10) | ||
| ] | ||
|
|
||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| return_value=many_posts, | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| assert response.status_code == 200 | ||
| assert len(response.json()["posts"]) == 5 | ||
|
|
||
|
|
||
| def test_get_blog_posts_returns_local_backup_on_failure(client): | ||
| """Endpoint returns local backup (non-empty list) when fetcher fails.""" | ||
| with patch( | ||
| "litellm.proxy.public_endpoints.public_endpoints.get_blog_posts", | ||
| side_effect=Exception("fetch failed"), | ||
| ): | ||
| response = client.get("/public/litellm_blog_posts") | ||
|
|
||
| # Should not 500 — returns local backup | ||
| assert response.status_code == 200 | ||
| assert "posts" in response.json() | ||
| assert len(response.json()["posts"]) > 0 |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Class-level mutable cache is not thread-safe
_cached_postsand_last_fetch_timeare class-level mutable variables shared across all threads. In a multi-threaded FastAPI deployment (or when usingasyncio.to_thread), concurrent requests could create a race condition inget_blog_posts()where one thread reads_cached_postswhile another thread is updating it after the TTL check. While the impact is low for a blog post list (worst case: an extra remote fetch), consider protecting these with athreading.Lockfor correctness, particularly since theGetModelCostMapclass this is modeled after has the same issue.