We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents d2fb888 + 516351f commit 9222169Copy full SHA for 9222169
llama/generation.py
@@ -74,8 +74,8 @@ def generate(
74
75
# Passing tensors instead of floats into self._generate_one_token_fn,
76
# so that different values would not trigger compilations of new graphs
77
- temperature_tensor = torch.tensor(temperature).to(device)
78
- top_p_tensor = torch.tensor(top_p).to(device)
+ temperature_tensor = torch.tensor(float(temperature)).to(device)
+ top_p_tensor = torch.tensor(float(top_p)).to(device)
79
with_temp = temperature > 0
80
81
cache_kvs = self.model.cache_kvs
0 commit comments