Skip to content
This repository was archived by the owner on Aug 23, 2023. It is now read-only.

docker benchmark updates #1037

Merged
merged 4 commits into from
Sep 10, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions docker/docker-cluster/caddy/Caddyfile
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,15 @@
tls off
}

:6063 {
proxy / metrictank0:6060 metrictank1:6060 metrictank2:6060 metrictank3:6060 {
header_upstream x-org-id 1
}
errors stderr
tls off
}


:8081 {
proxy / graphite {
header_upstream x-org-id 1
Expand Down
1 change: 1 addition & 0 deletions docker/docker-cluster/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ services:
ports:
- "6061:6061"
- "6062:6062"
- "6063:6063"
- "8081:8081"
- "8082:8082"
volumes:
Expand Down
6 changes: 0 additions & 6 deletions docker/docker-cluster/load.sh

This file was deleted.

4 changes: 2 additions & 2 deletions docker/docker-cluster/metrictank.ini
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ topics = mdm
# offset to start consuming from. Can be one of newest, oldest,last or a time duration
# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`.
# the further back in time you go, the more old data you can load into metrictank, but the longer it takes to catch up to realtime data
offset = last
offset = oldest
# kafka partitions to consume. use '*' or a comma separated list of id's
partitions = *
# save interval for offsets
Expand Down Expand Up @@ -295,7 +295,7 @@ partitions = *
# offset to start consuming from. Can be one of newest, oldest,last or a time duration
# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`.
# Should match your kafka-mdm-in setting
offset = last
offset = oldest
# save interval for offsets
offset-commit-interval = 5s
# Maximum time backlog processing can block during metrictank startup.
Expand Down
4 changes: 2 additions & 2 deletions docker/docker-dev-custom-cfg-kafka/metrictank.ini
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ topics = mdm
# offset to start consuming from. Can be one of newest, oldest,last or a time duration
# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`.
# the further back in time you go, the more old data you can load into metrictank, but the longer it takes to catch up to realtime data
offset = last
offset = oldest
# kafka partitions to consume. use '*' or a comma separated list of id's
partitions = *
# save interval for offsets
Expand Down Expand Up @@ -295,7 +295,7 @@ partitions = *
# offset to start consuming from. Can be one of newest, oldest,last or a time duration
# When using a duration but the offset request fails (e.g. Kafka doesn't have data so far back), metrictank falls back to `oldest`.
# Should match your kafka-mdm-in setting
offset = last
offset = oldest
# save interval for offsets
offset-commit-interval = 5s
# Maximum time backlog processing can block during metrictank startup.
Expand Down
4 changes: 4 additions & 0 deletions docker/scripts/bench-ingest-back-fill-speed.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
# for use with environments that have kafka offset=auto such as docker-cluster and docker-dev-custom-cfg-kafka
# first fill up kafka then restart the mt's with docker compose to observe backfill speed
fakemetrics backfill --kafka-mdm-addr localhost:9092 --kafka-mdm-v2=true --offset $((2*366*24))h --period 1800s --speedup 360000 --mpo 500
9 changes: 9 additions & 0 deletions docker/scripts/bench-realistic-workload-single-tenant.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash
trap 'kill $(jobs -p)' EXIT
# load up some old data
fakemetrics backfill --kafka-mdm-addr localhost:9092 --offset 5h --period 10s --speedup 100 --mpo 5000
# then continue with a realtime feed
fakemetrics feed --kafka-mdm-addr localhost:9092 --period 10s --mpo 5000 &
sleep 30
mt-index-cat -addr http://localhost:6063 -from 60min cass -hosts localhost:9042 -schema-file ./scripts/config/schema-idx-cassandra.toml vegeta-render-patterns | vegeta attack -duration 300s | vegeta report
sleep 20