Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Request for longer response to make cache test more robust. #2043

Merged
merged 3 commits into from
Mar 17, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 18 additions & 12 deletions test/oai/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,19 +167,22 @@ def test_legacy_cache():
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)

# Prompt to use for testing.
prompt = "Write a 100 word summary on the topic of the history of human civilization."

# Clear cache.
if os.path.exists(LEGACY_CACHE_DIR):
shutil.rmtree(LEGACY_CACHE_DIR)

# Test default cache seed.
client = OpenAIWrapper(config_list=config_list)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time

start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
Expand All @@ -189,12 +192,12 @@ def test_legacy_cache():
# Test with cache seed set through constructor
client = OpenAIWrapper(config_list=config_list, cache_seed=13)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time

start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
Expand All @@ -204,12 +207,12 @@ def test_legacy_cache():
# Test with cache seed set through create method
client = OpenAIWrapper(config_list=config_list)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=17)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=17)
end_time = time.time()
duration_with_cold_cache = end_time - start_time

start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=17)
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=17)
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
Expand All @@ -218,7 +221,7 @@ def test_legacy_cache():

# Test using a different cache seed through create method.
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache_seed=21)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache_seed=21)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
assert duration_with_warm_cache < duration_with_cold_cache
Expand All @@ -233,6 +236,9 @@ def test_cache():
filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]},
)

# Prompt to use for testing.
prompt = "Write a 100 word summary on the topic of the history of artificial intelligence."

# Clear cache.
if os.path.exists(LEGACY_CACHE_DIR):
shutil.rmtree(LEGACY_CACHE_DIR)
Expand All @@ -245,12 +251,12 @@ def test_cache():
with Cache.disk(cache_seed=49, cache_path_root=cache_dir) as cache:
client = OpenAIWrapper(config_list=config_list, cache=cache)
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_cold_cache = end_time - start_time

start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}])
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}])
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
Expand All @@ -264,12 +270,12 @@ def test_cache():
client = OpenAIWrapper(config_list=config_list)
with Cache.disk(cache_seed=312, cache_path_root=cache_dir) as cache:
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_cold_cache = end_time - start_time

start_time = time.time()
warm_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
warm_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_warm_cache = end_time - start_time
assert cold_cache_response == warm_cache_response
Expand All @@ -282,7 +288,7 @@ def test_cache():
# Test different cache seed.
with Cache.disk(cache_seed=123, cache_path_root=cache_dir) as cache:
start_time = time.time()
cold_cache_response = client.create(messages=[{"role": "user", "content": "random()"}], cache=cache)
cold_cache_response = client.create(messages=[{"role": "user", "content": prompt}], cache=cache)
end_time = time.time()
duration_with_cold_cache = end_time - start_time
assert duration_with_warm_cache < duration_with_cold_cache
Expand Down
Loading