Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
e37d54e
feat: add blog_posts.json and local backup
yuneng-jiang Feb 22, 2026
be1a543
feat: add GetBlogPosts utility with GitHub fetch and local fallback
yuneng-jiang Feb 22, 2026
7e5c63c
test: add cache reset fixture and LITELLM_LOCAL_BLOG_POSTS test
yuneng-jiang Feb 22, 2026
021a909
feat: add GET /public/litellm_blog_posts endpoint
yuneng-jiang Feb 22, 2026
4b1ce1f
fix: log fallback warning in blog posts endpoint and tighten test
yuneng-jiang Feb 22, 2026
ca9111e
feat: add disable_show_blog to UISettings
yuneng-jiang Feb 22, 2026
70f8e97
feat: add useUISettings and useDisableShowBlog hooks
yuneng-jiang Feb 22, 2026
a82c8c1
fix: rename useUISettings to useUISettingsFlags to avoid naming colli…
yuneng-jiang Feb 22, 2026
98da524
fix: use existing useUISettings hook in useDisableShowBlog to avoid c…
yuneng-jiang Feb 22, 2026
e241e6f
feat: add BlogDropdown component with react-query and error/retry state
yuneng-jiang Feb 22, 2026
1c0cfd7
fix: enforce 5-post limit in BlogDropdown and add cap test
yuneng-jiang Feb 22, 2026
929d592
fix: add retry, stable post key, enabled guard in BlogDropdown
yuneng-jiang Feb 22, 2026
a0965d5
feat: add BlogDropdown to navbar after Docs link
yuneng-jiang Feb 22, 2026
1ecfbad
adjust blog posts to fetch from github first
yuneng-jiang Feb 23, 2026
94425df
fixing path
yuneng-jiang Feb 23, 2026
08f4a27
Merge remote-tracking branch 'origin' into litellm_blog_dropdown
yuneng-jiang Feb 23, 2026
54b7e1a
adjust blog post path
yuneng-jiang Feb 23, 2026
09cc3b8
ui changes
yuneng-jiang Feb 23, 2026
9f3fc49
adding tests
yuneng-jiang Feb 24, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions litellm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,10 @@
"LITELLM_MODEL_COST_MAP_URL",
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json",
)
blog_posts_url: str = os.getenv(
"LITELLM_BLOG_POSTS_URL",
"https://raw.githubusercontent.com/BerriAI/litellm/main/litellm/blog_posts.json",
)
anthropic_beta_headers_url: str = os.getenv(
"LITELLM_ANTHROPIC_BETA_HEADERS_URL",
"https://raw.githubusercontent.com/BerriAI/litellm/main/litellm/anthropic_beta_headers_config.json",
Expand Down
10 changes: 10 additions & 0 deletions litellm/blog_posts.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"posts": [
{
"title": "Incident Report: SERVER_ROOT_PATH regression broke UI routing",
"description": "How a single line removal caused UI 404s for all deployments using SERVER_ROOT_PATH, and the tests we added to prevent it from happening again.",
"date": "2026-02-21",
"url": "https://docs.litellm.ai/blog/server-root-path-incident"
}
]
}
128 changes: 128 additions & 0 deletions litellm/litellm_core_utils/get_blog_posts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
"""
Pulls the latest LiteLLM blog posts from GitHub.

Falls back to the bundled local backup on any failure.
GitHub JSON URL is configured via litellm.blog_posts_url (or LITELLM_BLOG_POSTS_URL env var).

Disable remote fetching entirely:
export LITELLM_LOCAL_BLOG_POSTS=True
"""

import json
import os
import time
from importlib.resources import files
from typing import Any, Dict, List, Optional

import httpx
from pydantic import BaseModel

from litellm import verbose_logger

BLOG_POSTS_TTL_SECONDS: int = 3600 # 1 hour


class BlogPost(BaseModel):
title: str
description: str
date: str
url: str


class BlogPostsResponse(BaseModel):
posts: List[BlogPost]


class GetBlogPosts:
"""
Fetches, validates, and caches LiteLLM blog posts.

Mirrors the structure of GetModelCostMap:
- Fetches from GitHub with a 5-second timeout
- Validates the response has a non-empty ``posts`` list
- Caches the result in-process for BLOG_POSTS_TTL_SECONDS (1 hour)
- Falls back to the bundled local backup on any failure
"""

_cached_posts: Optional[List[Dict[str, str]]] = None
_last_fetch_time: float = 0.0
Comment on lines +47 to +48
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Class-level mutable cache is not thread-safe

_cached_posts and _last_fetch_time are class-level mutable variables shared across all threads. In a multi-threaded FastAPI deployment (or when using asyncio.to_thread), concurrent requests could create a race condition in get_blog_posts() where one thread reads _cached_posts while another thread is updating it after the TTL check. While the impact is low for a blog post list (worst case: an extra remote fetch), consider protecting these with a threading.Lock for correctness, particularly since the GetModelCostMap class this is modeled after has the same issue.

import threading

class GetBlogPosts:
    _cached_posts: Optional[List[Dict[str, str]]] = None
    _last_fetch_time: float = 0.0
    _lock: threading.Lock = threading.Lock()


@staticmethod
def load_local_blog_posts() -> List[Dict[str, str]]:
"""Load the bundled local backup blog posts."""
content = json.loads(
files("litellm")
.joinpath("blog_posts.json")
.read_text(encoding="utf-8")
)
return content.get("posts", [])

@staticmethod
def fetch_remote_blog_posts(url: str, timeout: int = 5) -> dict:
"""
Fetch blog posts JSON from a remote URL.

Returns the parsed response. Raises on network/parse errors.
"""
response = httpx.get(url, timeout=timeout)
response.raise_for_status()
return response.json()

@staticmethod
def validate_blog_posts(data: Any) -> bool:
"""Return True if data is a dict with a non-empty ``posts`` list."""
if not isinstance(data, dict):
verbose_logger.warning(
"LiteLLM: Blog posts response is not a dict (type=%s). "
"Falling back to local backup.",
type(data).__name__,
)
return False
posts = data.get("posts")
if not isinstance(posts, list) or len(posts) == 0:
verbose_logger.warning(
"LiteLLM: Blog posts response has no valid 'posts' list. "
"Falling back to local backup.",
)
return False
return True

@classmethod
def get_blog_posts(cls, url: str) -> List[Dict[str, str]]:
"""
Return the blog posts list.

Uses the in-process cache if within BLOG_POSTS_TTL_SECONDS.
Fetches from ``url`` otherwise, falling back to local backup on failure.
"""
if os.getenv("LITELLM_LOCAL_BLOG_POSTS", "").lower() == "true":
return cls.load_local_blog_posts()

now = time.time()
cached = cls._cached_posts
if cached is not None and (now - cls._last_fetch_time) < BLOG_POSTS_TTL_SECONDS:
return cached

try:
data = cls.fetch_remote_blog_posts(url)
except Exception as e:
verbose_logger.warning(
"LiteLLM: Failed to fetch blog posts from %s: %s. "
"Falling back to local backup.",
url,
str(e),
)
return cls.load_local_blog_posts()

if not cls.validate_blog_posts(data):
return cls.load_local_blog_posts()

posts = data["posts"]
cls._cached_posts = posts
cls._last_fetch_time = now
return posts


def get_blog_posts(url: str) -> List[Dict[str, str]]:
"""Public entry point — returns the blog posts list."""
return GetBlogPosts.get_blog_posts(url=url)
32 changes: 32 additions & 0 deletions litellm/proxy/public_endpoints/public_endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,16 @@
import os
from typing import List

import litellm
from fastapi import APIRouter, Depends, HTTPException

from litellm._logging import verbose_logger
from litellm.litellm_core_utils.get_blog_posts import (
BlogPost,
BlogPostsResponse,
GetBlogPosts,
get_blog_posts,
)
from litellm.proxy._types import CommonProxyErrors
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.types.agents import AgentCard
Expand Down Expand Up @@ -193,6 +201,30 @@ async def get_litellm_model_cost_map():
)


@router.get(
"/public/litellm_blog_posts",
tags=["public"],
response_model=BlogPostsResponse,
)
async def get_litellm_blog_posts():
"""
Public endpoint to get the latest LiteLLM blog posts.

Fetches from GitHub with a 1-hour in-process cache.
Falls back to the bundled local backup on any failure.
"""
try:
posts_data = get_blog_posts(url=litellm.blog_posts_url)
except Exception as e:
verbose_logger.warning(
"LiteLLM: get_litellm_blog_posts endpoint fallback triggered: %s", str(e)
)
posts_data = GetBlogPosts.load_local_blog_posts()

posts = [BlogPost(**p) for p in posts_data[:5]]
return BlogPostsResponse(posts=posts)
Comment on lines +209 to +225
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Synchronous HTTP call blocks the async event loop

get_litellm_blog_posts is an async endpoint, but get_blog_posts() internally calls httpx.get() — a synchronous, blocking HTTP call with a 5-second timeout. When the cache has expired (every hour), this will block the entire asyncio event loop for up to 5 seconds, stalling all other concurrent requests being served by that worker.

Unlike get_litellm_model_cost_map (which reads a pre-loaded in-memory dict), this endpoint triggers a live HTTP fetch on each cache miss.

Consider either:

  • Running the blocking call in a thread via asyncio.to_thread:
import asyncio
posts_data = await asyncio.to_thread(get_blog_posts, url=litellm.blog_posts_url)
  • Or switching GetBlogPosts.fetch_remote_blog_posts to use httpx.AsyncClient and making get_blog_posts an async function.



@router.get(
"/public/agents/fields",
tags=["public", "[beta] Agents"],
Expand Down
80 changes: 80 additions & 0 deletions tests/proxy_unit_tests/test_blog_posts_endpoint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
"""Tests for the /public/litellm_blog_posts endpoint."""
from unittest.mock import patch

import pytest
from fastapi.testclient import TestClient

SAMPLE_POSTS = [
{
"title": "Test Post",
"description": "A test post.",
"date": "2026-01-01",
"url": "https://www.litellm.ai/blog/test",
}
]


@pytest.fixture
def client():
"""Create a TestClient with just the public_endpoints router."""
from fastapi import FastAPI

from litellm.proxy.public_endpoints.public_endpoints import router

app = FastAPI()
app.include_router(router)
return TestClient(app)


def test_get_blog_posts_returns_response_shape(client):
with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
return_value=SAMPLE_POSTS,
):
response = client.get("/public/litellm_blog_posts")

assert response.status_code == 200
data = response.json()
assert "posts" in data
assert len(data["posts"]) == 1
post = data["posts"][0]
assert post["title"] == "Test Post"
assert post["description"] == "A test post."
assert post["date"] == "2026-01-01"
assert post["url"] == "https://www.litellm.ai/blog/test"


def test_get_blog_posts_limits_to_five(client):
"""Endpoint returns at most 5 posts."""
many_posts = [
{
"title": f"Post {i}",
"description": "desc",
"date": "2026-01-01",
"url": f"https://www.litellm.ai/blog/{i}",
}
for i in range(10)
]

with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
return_value=many_posts,
):
response = client.get("/public/litellm_blog_posts")

assert response.status_code == 200
assert len(response.json()["posts"]) == 5


def test_get_blog_posts_returns_local_backup_on_failure(client):
"""Endpoint returns local backup (non-empty list) when fetcher fails."""
with patch(
"litellm.proxy.public_endpoints.public_endpoints.get_blog_posts",
side_effect=Exception("fetch failed"),
):
response = client.get("/public/litellm_blog_posts")

# Should not 500 — returns local backup
assert response.status_code == 200
assert "posts" in response.json()
assert len(response.json()["posts"]) > 0
Loading
Loading