Skip to content

Commit e092855

Browse files
authored
Merge branch 'main' into split_tests
Signed-off-by: Yanchao Lu <[email protected]>
2 parents 302bd1d + 96ff82e commit e092855

File tree

248 files changed

+6677
-3383
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

248 files changed

+6677
-3383
lines changed

.gitattributes

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,5 @@
77
triton_backend/tools/gpt/input_data.json filter=lfs diff=lfs merge=lfs -text
88
*cubin.cpp filter=lfs diff=lfs merge=lfs -text
99
docs/source/blogs/media/tech_blog3_mla_absorb.png filter=lfs diff=lfs merge=lfs -text
10+
tests/integration/test_input_files/*.png filter=lfs diff=lfs merge=lfs -text
11+
tests/integration/test_input_files/*.jpg filter=lfs diff=lfs merge=lfs -text

.github/CODEOWNERS

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,8 +50,8 @@
5050
/tests/unittest/_torch/compilation @NVIDIA/trt-llm-torch-graph-compiler
5151
/tests/unittest/_torch/multi_gpu/test_ar_residual_norm.py @NVIDIA/trt-llm-torch-graph-compiler
5252
/tests/unittest/_torch/multi_gpu/test_user_buffers.py @NVIDIA/trt-llm-torch-graph-compiler
53-
/tests/unittest/_torch/test_custom_ops.py @NVIDIA/trt-llm-torch-graph-compiler
54-
/tests/unittest/_torch/test_autotuner.py @NVIDIA/trt-llm-torch-graph-compiler
53+
/tests/unittest/_torch/thop/test_custom_ops.py @NVIDIA/trt-llm-torch-graph-compiler
54+
/tests/unittest/_torch/misc/test_autotuner.py @NVIDIA/trt-llm-torch-graph-compiler
5555

5656
## TensorRT-LLM Pytorch - Attention
5757
/tensorrt_llm/_torch/attention_backend @NVIDIA/trt-llm-torch-attention-devs

.github/workflows/blossom-ci.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ jobs:
4040
startsWith(github.event.comment.body, '/bot skip --comment') ||
4141
startsWith(github.event.comment.body, '/bot reuse-pipeline') ||
4242
startsWith(github.event.comment.body, '/bot kill')) && contains(
43-
fromJson('["byshiue","chuangz0","funatiq","hypdeb","jdemouth-nvidia","joyang-nv","lowsfer","Tabrizian","yweng0828","Shixiaowei02","MartinMarciniszyn","schetlur-nv","dcampora","pcastonguay","Naveassaf","lfr-0531","nekorobov","PerkzZheng","kaiyux","nv-guomingz","LinPoly","thorjohnsen","jiahanc","latency1024","tburt-nv","zeroepoch","chzblych","niukuo","ZhanruiSunCh","EmmaQiaoCh","yiqingy0","achartier","suyoggupta","amukkara","mk-nvidia","QiJune","lucaslie","davidmlw","hlu1","nvzhou","syuoni","NVGaryJi","symphonylyh","hello-11","zongfeijing","Jackch-NV","jinyangyuan-nvidia","LarryXFly","crazydemo","jaedeok-nvidia","wm2012011492","rosenrodt","zhuoyao1012","xinhe-nv","Yuening-wa","Shunkangz","zhengd-nv","yibinl-nvidia","StanleySun639","KingsleyLiu-NV","kxdc","yingcanw","BestJuly","ChristinaZ","bobboli","xueweilnvidia","kunlunl","cherichy","lucifer1004","Autumn1998","litaotju","peaceh-nv","liji-nv","SimengLiu-nv","yuxianq","yechank-nvidia","vallis-neria","DylanChen-NV","Tracin","zhhuang-nv","ISEEKYAN","xupinjie","tongyuantongyu","laikhtewari","zhuolingwang","dominicshanshan","jershi425","shifangx","StudyingShao","Superjomn","dongjiyingdjy","guangyunh-nv","wili-65535","tiffany940107","DanBlanaru","mikeiovine","djns99","ruodil","xiaoweiw-nv","xuwchen","bashimao","yizhang-nv","hyukn","nvpohanh","yuki-666","juney-nvidia","barry-delaney","Kefeng-Duan","MinaHuai","yilin-void","jhaotingc","jmydurant","katec846","CarstyYou","Njuapp","Jie-Fang","nvbrantz","inocsin","ruoqianguo","chenfeiz0326","ming-wei","eopXD","longlee0622","dongfengy","georgeliu95","evezhier","rakib-hasan","shangz-ai","JyChang012","wangsiping1997","yuanjings-nvda","tomeras91","roikoren755","amirkl94","shaharmor98","danielafrimi","amitz-nv","hijkzzz","rzilberstein-nvidia","dc3671","hchings","yuhengxnv","dongxuy04","qiaoxj07","omera-nv","DomBrown","brb-nv","FrankD412","yuhsuan-t","Fridah-nv","a-mccarthy","HuiGao-NV","alexmsettle","meenchen","sugunav14","cjluo-nv","kyleliang-nv","chang-l","WeiHaocheng","qixiang-99","BatshevaBlack","ebarilanM","xmchen1987","lingjiew","heyuhhh","netanel-haber","jiefangz-nv","wyw1267","yunruis","sklevtsov-nvidia","jgangani","pamelap-nvidia","ixlmar","GalSha","Dido0o0","rabiel","nvzhihanj","milesial","fzmu727","zackyoray","RoeyAzran1992","viraatc","v-shobhit","yuanjingx87","uchihatmtkinu","nvrohanv","vegaluisjose","qsang-nv","ChunhuanLin","timlee0212","venkywonka","zbpatel","tijyojwad","shyeh25","zihaok","nv-yilinf","ttyio","farazkh80","yuantailing","JennyLiu-nv","moraxu","IzzyPutterman","nvchenghaoz","nvxuanyuc","poweiw","stnie","zhanga5","nzmora-nvidia","greg-kwasniewski1","linda-stadter","Tom-Zheng","vanshilshah97","ixlmar","MatthiasKohl","Wanli-Jiang", "arekay", "davidclark-nv", "2ez4bz", "tcherckez-nvidia", "MrGeva", "galagam", "limin2021", "dhansen-nvidia","talorabr","kanghui0204","wu6u3tw","hvagadia","xavier-nvidia","raayandhar","dbari","nvjullin","elvischenv","zhenhuaw-me","weireweire","yifeizhang-c","jiaganc","ziyixiong-nv","FelixXidddd","JunyiXu-nv","bo-nv","zerollzeng","RayenTian","ameynaik-hub","raymochen","shuyixiong","johncalesp","leslie-fang25","reasonsolo","zhou-yuxin","vadiklyutiy","yali-arch","NVShreyas","h-guo18","pengbowang-nv","lancelly","heyuhhh","mayani-nv","flin3500","sunnyqgg","kris1025"]'),
43+
fromJson('["byshiue","chuangz0","funatiq","hypdeb","jdemouth-nvidia","joyang-nv","lowsfer","Tabrizian","yweng0828","Shixiaowei02","MartinMarciniszyn","schetlur-nv","dcampora","pcastonguay","Naveassaf","lfr-0531","nekorobov","PerkzZheng","kaiyux","nv-guomingz","LinPoly","thorjohnsen","jiahanc","latency1024","tburt-nv","zeroepoch","chzblych","niukuo","ZhanruiSunCh","EmmaQiaoCh","yiqingy0","achartier","suyoggupta","amukkara","mk-nvidia","QiJune","lucaslie","davidmlw","hlu1","nvzhou","syuoni","NVGaryJi","symphonylyh","hello-11","zongfeijing","Jackch-NV","jinyangyuan-nvidia","LarryXFly","crazydemo","jaedeok-nvidia","wm2012011492","rosenrodt","zhuoyao1012","xinhe-nv","Yuening-wa","Shunkangz","zhengd-nv","yibinl-nvidia","StanleySun639","KingsleyLiu-NV","kxdc","yingcanw","BestJuly","ChristinaZ","bobboli","xueweilnvidia","kunlunl","cherichy","lucifer1004","Autumn1998","litaotju","peaceh-nv","liji-nv","SimengLiu-nv","yuxianq","yechank-nvidia","vallis-neria","DylanChen-NV","Tracin","zhhuang-nv","ISEEKYAN","xupinjie","tongyuantongyu","laikhtewari","zhuolingwang","dominicshanshan","jershi425","shifangx","StudyingShao","Superjomn","dongjiyingdjy","guangyunh-nv","wili-65535","tiffany940107","DanBlanaru","mikeiovine","djns99","ruodil","xiaoweiw-nv","xuwchen","bashimao","yizhang-nv","hyukn","nvpohanh","yuki-666","juney-nvidia","barry-delaney","Kefeng-Duan","MinaHuai","yilin-void","jhaotingc","jmydurant","katec846","CarstyYou","Njuapp","Jie-Fang","nvbrantz","inocsin","ruoqianguo","chenfeiz0326","ming-wei","eopXD","longlee0622","dongfengy","georgeliu95","evezhier","rakib-hasan","shangz-ai","JyChang012","wangsiping1997","yuanjings-nvda","tomeras91","roikoren755","amirkl94","shaharmor98","danielafrimi","amitz-nv","hijkzzz","rzilberstein-nvidia","dc3671","hchings","yuhengxnv","dongxuy04","qiaoxj07","omera-nv","DomBrown","brb-nv","FrankD412","yuhsuan-t","Fridah-nv","a-mccarthy","HuiGao-NV","alexmsettle","meenchen","sugunav14","cjluo-nv","kyleliang-nv","chang-l","WeiHaocheng","qixiang-99","BatshevaBlack","ebarilanM","xmchen1987","lingjiew","heyuhhh","netanel-haber","jiefangz-nv","wyw1267","yunruis","sklevtsov-nvidia","jgangani","pamelap-nvidia","ixlmar","GalSha","Dido0o0","rabiel","nvzhihanj","milesial","fzmu727","zackyoray","RoeyAzran1992","viraatc","v-shobhit","yuanjingx87","uchihatmtkinu","nvrohanv","vegaluisjose","qsang-nv","ChunhuanLin","timlee0212","venkywonka","zbpatel","tijyojwad","shyeh25","zihaok","nv-yilinf","ttyio","farazkh80","yuantailing","JennyLiu-nv","moraxu","IzzyPutterman","nvchenghaoz","nvxuanyuc","poweiw","stnie","zhanga5","nzmora-nvidia","greg-kwasniewski1","linda-stadter","Tom-Zheng","vanshilshah97","ixlmar","MatthiasKohl","Wanli-Jiang", "arekay", "davidclark-nv", "2ez4bz", "tcherckez-nvidia", "MrGeva", "galagam", "limin2021", "dhansen-nvidia","talorabr","kanghui0204","wu6u3tw","hvagadia","xavier-nvidia","raayandhar","dbari","nvjullin","elvischenv","zhenhuaw-me","weireweire","yifeizhang-c","jiaganc","ziyixiong-nv","FelixXidddd","JunyiXu-nv","bo-nv","zerollzeng","RayenTian","ameynaik-hub","raymochen","shuyixiong","johncalesp","leslie-fang25","reasonsolo","zhou-yuxin","vadiklyutiy","yali-arch","NVShreyas","h-guo18","pengbowang-nv","lancelly","heyuhhh","mayani-nv","flin3500","sunnyqgg","kris1025", "karljang"]'),
4444
github.actor)
4545
steps:
4646
- name: Check if comment is issued by authorized person

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ TensorRT-LLM
99
[![python](https://img.shields.io/badge/python-3.10-green)](https://www.python.org/downloads/release/python-31012/)
1010
[![cuda](https://img.shields.io/badge/cuda-12.9.1-green)](https://developer.nvidia.com/cuda-downloads)
1111
[![trt](https://img.shields.io/badge/TRT-10.11.0-green)](https://developer.nvidia.com/tensorrt)
12-
[![version](https://img.shields.io/badge/release-1.1.0rc1-green)](./tensorrt_llm/version.py)
12+
[![version](https://img.shields.io/badge/release-1.1.0rc2-green)](./tensorrt_llm/version.py)
1313
[![license](https://img.shields.io/badge/license-Apache%202-blue)](./LICENSE)
1414

1515
[Architecture](./docs/source/torch/arch_overview.md)&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;[Performance](./docs/source/performance/perf-overview.md)&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;[Examples](https://nvidia.github.io/TensorRT-LLM/quick-start-guide.html)&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;[Documentation](./docs/source/)&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;[Roadmap](https://github.com/NVIDIA/TensorRT-LLM/issues?q=is%3Aissue%20state%3Aopen%20label%3Aroadmap)
@@ -18,10 +18,9 @@ TensorRT-LLM
1818
<div align="left">
1919

2020
## Tech Blogs
21-
* [08/06] Running a High Performance GPT-OSS-120B Inference Server with TensorRT-LLM
21+
* [08/05] Running a High-Performance GPT-OSS-120B Inference Server with TensorRT-LLM
2222
[➡️ link](./docs/source/blogs/tech_blog/blog9_Deploying_GPT_OSS_on_TRTLLM.md)
2323

24-
2524
* [08/01] Scaling Expert Parallelism in TensorRT-LLM (Part 2: Performance Status and Optimization)
2625
[➡️ link](./docs/source/blogs/tech_blog/blog8_Scaling_Expert_Parallelism_in_TensorRT-LLM_part2.md)
2726

@@ -44,6 +43,7 @@ TensorRT-LLM
4443
[➡️ link](./docs/source/blogs/tech_blog/blog1_Pushing_Latency_Boundaries_Optimizing_DeepSeek-R1_Performance_on_NVIDIA_B200_GPUs.md)
4544

4645
## Latest News
46+
* [08/05] 🌟 TensorRT-LLM delivers Day-0 support for OpenAI's latest open-weights models: GPT-OSS-120B [➡️ link](https://huggingface.co/openai/gpt-oss-120b) and GPT-OSS-20B [➡️ link](https://huggingface.co/openai/gpt-oss-20b)
4747
* [07/15] 🌟 TensorRT-LLM delivers Day-0 support for LG AI Research's latest model, EXAONE 4.0 [➡️ link](https://huggingface.co/LGAI-EXAONE/EXAONE-4.0-32B)
4848
* [06/17] Join NVIDIA and DeepInfra for a developer meetup on June 26 ✨ [➡️ link](https://events.nvidia.com/scaletheunscalablenextgenai)
4949
* [05/22] Blackwell Breaks the 1,000 TPS/User Barrier With Meta’s Llama 4 Maverick
@@ -253,5 +253,5 @@ Deprecation is used to inform developers that some APIs and tools are no longer
253253
## Useful Links
254254
- [Quantized models on Hugging Face](https://huggingface.co/collections/nvidia/model-optimizer-66aa84f7966b3150262481a4): A growing collection of quantized (e.g., FP8, FP4) and optimized LLMs, including [DeepSeek FP4](https://huggingface.co/nvidia/DeepSeek-R1-FP4), ready for fast inference with TensorRT-LLM.
255255
- [NVIDIA Dynamo](https://github.com/ai-dynamo/dynamo): A datacenter scale distributed inference serving framework that works seamlessly with TensorRT-LLM.
256-
- [AutoDeploy](./examples/auto_deploy/README.md): A prototype backend for TensorRT-LLM to simplify and accelerate the deployment of PyTorch models.
256+
- [AutoDeploy](https://nvidia.github.io/TensorRT-LLM/torch/auto_deploy/auto-deploy.html): A prototype backend for TensorRT-LLM to simplify and accelerate the deployment of PyTorch models.
257257
- [WeChat Discussion Group](https://github.com/NVIDIA/TensorRT-LLM/issues/5359): A real-time channel for TensorRT-LLM Q&A and news.

cpp/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ add_compile_definitions("TLLM_GEN_EXPORT_INTERFACE")
6969
add_compile_definitions("TLLM_ENABLE_CUDA")
7070

7171
set(BINDING_TYPE
72-
"pybind"
72+
"nanobind"
7373
CACHE STRING
7474
"Binding type of Python bindings for C++ runtime and batch manager")
7575

cpp/include/tensorrt_llm/batch_manager/createNewDecoderRequests.h

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
#include "tensorrt_llm/runtime/common.h"
2525
#include "tensorrt_llm/runtime/iTensor.h"
2626
#include "tensorrt_llm/runtime/modelConfig.h"
27-
#include "tensorrt_llm/runtime/request.h"
2827
#include "tensorrt_llm/runtime/worldConfig.h"
2928

3029
namespace tensorrt_llm::runtime
@@ -88,37 +87,6 @@ class CreateNewDecoderRequests : Algorithm
8887
SizeType32 maxSequenceLength, OptionalRef<MedusaBuffers const> medusaBuffers) const;
8988

9089
private:
91-
//! @brief Setups decoder internal tensors for new speculative decoding request
92-
static void newRequestSpeculativeDecoding(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
93-
SamplingConfig const& samplingConfig, runtime::ModelConfig const& modelConfig,
94-
DecodingInput& jointDecodingInput, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream,
95-
CudaStream const& decoderStream, SpeculativeDecodingMode const& speculativeDecodingMode,
96-
SizeType32 maxDecodingEngineTokens);
97-
98-
//! @brief Setups decoder internal tensors for new request in Draft model Sps mode
99-
static void newRequestDraftTokensExternal(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
100-
SamplingConfig const& samplingConfig, DecodingInput& jointDecodingInput, CudaStream const& decoderStream);
101-
102-
//! @brief Setups decoder internal tensors for new Medusa request
103-
static void newRequestMedusa(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
104-
DecodingInput& jointDecodingInput, CudaStream const& decoderStream, SizeType32 maxDecodingEngineTokens);
105-
106-
//! @brief Setups decoder internal tensors for new Lookahead request
107-
static void newRequestLookahead(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
108-
DecodingInput& jointDecodingInput, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
109-
110-
//! @brief Setups decoder internal tensors for new Explicit draft tokens request
111-
static void newRequestExplicitDraftTokens(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
112-
DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
113-
114-
//! @brief Setups decoder internal tensors for new Eagle request
115-
static void newRequestEagle(SizeType32 batchIdx, runtime::decoder_batch::Request const& request,
116-
runtime::ModelConfig const& modelConfig, DecodingOutput& jointDecodingOutput, CudaStream const& runtimeStream);
117-
118-
[[nodiscard]] std::shared_ptr<runtime::ITensor> retrieveDraftLogits(runtime::ModelConfig const& modelConfig,
119-
runtime::WorldConfig const& worldConfig, std::shared_ptr<runtime::ITensor> const& tensor,
120-
runtime::BufferManager const& bufferManager) const;
121-
12290
bool mSpeculativeDecodingFastLogits;
12391
bool mIsLeaderInOrchMode;
12492
bool mIsNormalizeLogProbs;

cpp/include/tensorrt_llm/batch_manager/kvCacheManager.h

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -551,7 +551,7 @@ class WindowBlockManager
551551
GenerationRequest& sequence, SizeType32 inputLength, SizeType32 numContextBlocks, LlmRequest& llmRequest);
552552

553553
//! \brief Assign blocks for new sequence. Does not try to reuse blocks.
554-
void addSequence(GenerationRequest& sequence, SizeType32 numBlocks, SizeType32 unsharedBlockIdx);
554+
void addSequence(GenerationRequest& sequence, SizeType32 numContextBlocks, bool isShareLastContextBlock);
555555

556556
//! \brief Allocate new block for each beam of the sequence.
557557
//! \details Might free cached blocks if no free blocks are available.
@@ -869,8 +869,13 @@ class BlockManager
869869
void addSequence(GenerationRequest& sequence, SizeType32 inputLength, SizeType32 numContextBlocks,
870870
LlmRequest& llmRequest, SizeType32 windowSize);
871871

872+
//! \brief Assign blocks for a new sequence.
873+
//! \param sequence The GenerationRequest to process.
874+
//! \param numContextBlocks Number of context blocks to allocate.
875+
//! \param windowSize Attention window size
876+
//! \param isShareLastContextBlock If true, the last context block is shared among beams.
872877
void addSequence(
873-
GenerationRequest& sequence, SizeType32 numBlocks, SizeType32 unsharedBlockIdx, SizeType32 windowSize);
878+
GenerationRequest& sequence, SizeType32 numContextBlocks, SizeType32 windowSize, bool isShareLastContextBlock);
874879

875880
void allocateBlock(GenerationRequest& sequence, SizeType32 windowSize);
876881

@@ -1106,6 +1111,15 @@ class BlockManager
11061111
return mWindowBlockManagers.at(windowSize).getPool(relativePoolIndex);
11071112
}
11081113

1114+
//! \brief Update cache offsets for blocks initiated from sequence
1115+
void updateSequenceCacheBlockOffsets(GenerationRequest& seq, SizeType32 windowSize);
1116+
1117+
//! \brief Update cache offsets for last block
1118+
void updateLastCacheBlockOffsets(GenerationRequest& seq, SizeType32 windowSize);
1119+
1120+
//! \brief Update cache offsets for block at index
1121+
void updateCacheBlockOffsetsAtIdx(GenerationRequest& seq, SizeType32 windowSize, SizeType32 blockIdx);
1122+
11091123
private:
11101124
[[nodiscard]] WindowBlockManager const& windowManagerByLayer(SizeType32 layerIdx) const
11111125
{
@@ -1637,12 +1651,6 @@ class KVCacheManager : public BaseKVCacheManager
16371651
[[nodiscard]] static SizeType32 calculateMaxAttentionWindow(SizeType32 inputLength, SizeType32 outputLength,
16381652
SizeType32 sinkTokenLength, SizeType32 blockCapacity, SizeType32 beamWidth, SizeType32 tokensPerBlock);
16391653

1640-
private:
1641-
void cacheBlockOffsets(GenerationRequest& seq, SizeType32 windowSize);
1642-
void cacheNewBlockOffsets(GenerationRequest& seq, SizeType32 windowSize);
1643-
void updateNewBlockPointer(GenerationRequest& seq, SizeType32 windowSize, SizeType32 blockIdx);
1644-
void updateToken(GenerationRequest& sequence, bool addToken);
1645-
16461654
private:
16471655
// Maximum number of sequences
16481656
SizeType32 mMaxNumSequences;

cpp/include/tensorrt_llm/batch_manager/llmRequest.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1110,7 +1110,7 @@ class GenericLlmRequest
11101110

11111111
[[nodiscard]] SizeType32 getNumDraftTokens() const
11121112
{
1113-
return mDraftTokens->size();
1113+
return hasDraftTokens() ? mDraftTokens->size() : 0;
11141114
}
11151115

11161116
void discardDraftTokens(SizeType32 numTokensToDiscard)

cpp/include/tensorrt_llm/common/logger.h

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,20 +54,21 @@ class Logger
5454

5555
#if defined(_MSC_VER)
5656
template <typename... Args>
57-
void log(Level level, char const* format, Args const&... args);
57+
void log(Level const level, char const* format, Args const&... args);
5858

5959
template <typename... Args>
60-
void log(Level level, int rank, char const* format, Args const&... args);
60+
void log(Level const level, int const rank, char const* format, Args const&... args);
6161
#else
6262
template <typename... Args>
63-
void log(Level level, char const* format, Args const&... args) __attribute__((format(printf, 3, 0)));
63+
void log(Level const level, char const* format, Args const&... args) __attribute__((format(printf, 3, 0)));
6464

6565
template <typename... Args>
66-
void log(Level level, int rank, char const* format, Args const&... args) __attribute__((format(printf, 4, 0)));
66+
void log(Level const level, int const rank, char const* format, Args const&... args)
67+
__attribute__((format(printf, 4, 0)));
6768
#endif
6869

6970
template <typename... Args>
70-
void log(Level level, std::string const& format, Args const&... args)
71+
void log(Level const level, std::string const& format, Args const&... args)
7172
{
7273
return log(level, format.c_str(), args...);
7374
}
@@ -134,7 +135,7 @@ class Logger
134135
};
135136

136137
template <typename... Args>
137-
void Logger::log(Logger::Level level, char const* format, Args const&... args)
138+
void Logger::log(Logger::Level const level, char const* format, Args const&... args)
138139
{
139140
if (isEnabled(level))
140141
{

0 commit comments

Comments
 (0)