Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions metrics/check_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,12 @@ def test_rabbitmq(self):
]
self.run_queries(queries)

def test_kafka(self):
queries = [
stability_query(source='kafka-client', test='kafka')
]
self.run_queries(queries)

@classmethod
def setUpClass(self):
self.prom = setup_promethus()
Expand Down
10 changes: 10 additions & 0 deletions perf/docker/Dockerfile.kafka
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
FROM python:alpine

RUN pip3 install -q kafka-python prometheus_client

ENV ADDRESS kafka:19092

ADD kafka/client.py /client.py
ADD prom_client.py /prom_client.py

CMD ["python3", "-u", "/client.py"]
100 changes: 100 additions & 0 deletions perf/docker/kafka/client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
# Copyright 2019 Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import time
import logging
import prom_client
import sys
from kafka import KafkaConsumer, KafkaProducer
from kafka.errors import TopicAlreadyExistsError
from kafka.admin import KafkaAdminClient, NewTopic

address = os.environ["ADDRESS"]

topic = 'stability'


def with_metrics(f, valid=None):
return prom_client.attempt_request(
f,
source='kafka-client',
destination='kafka',
valid=valid
)


def send(queue, message):
with_metrics(
lambda: queue.send(
topic=topic,
value=bytes(message, encoding="utf-8")
),
valid=lambda resp: resp.get(timeout=1)
)


def receive(queue, expected):
with_metrics(
lambda: next(queue),
valid=lambda resp: resp.value == expected
)


if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logging.info("Starting prometheus")
prom_client.report_metrics()
prom_client.report_running('kafka')

logging.info("Setting up topic")
admin_client, succeeded = with_metrics(
lambda: KafkaAdminClient(bootstrap_servers=address))
if not succeeded:
logging.error("Failed to setup publisher client")
sys.exit(1)
try:
admin_client.create_topics(
[NewTopic(name=topic, num_partitions=1, replication_factor=1)],
timeout_ms=1000
)
logging.info("Topic created")
except TopicAlreadyExistsError:
logging.info("Topic already exists")

pub, succeeded = with_metrics(
lambda: KafkaProducer(bootstrap_servers=address))
if not succeeded:
logging.error("Failed to setup publisher client")
sys.exit(1)
logging.info("Created pub")
sub, succeeded = with_metrics(lambda: KafkaConsumer(
bootstrap_servers=address,
value_deserializer=lambda m: m.decode('utf-8'),
consumer_timeout_ms=1000,
))
if not succeeded:
logging.error("Failed to setup subscriber client")
sys.exit(1)
logging.info("Created sub")
sub.subscribe([topic])
logging.info("Subscribed to topic")

while True:
message = "a message"
logging.info("Sending message")
send(pub, message)
logging.info("Reading message")
receive(sub, message)
time.sleep(.5)
10 changes: 10 additions & 0 deletions perf/stability/kafka/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: v1
name: kafka
version: 1.0.0
description: Example Helm chart for Kafka on Istio
keywords:
- istio
- performance
sources:
- https://github.com/istio/istio
engine: gotpl
8 changes: 8 additions & 0 deletions perf/stability/kafka/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# Kafka

This test installs an instance of Kafka using the [incubator/kafka](https://github.com/helm/charts/tree/master/incubator/kafka) Helm chart.

The charts are based on the `incubator/kafka` charts, with some changes:
* An `istio-headless.yaml` file is added, to support using the headless Kafka services. Istio will not generate an entry for each pod in the headless service by default, so we need to do this manually.
* Kafka and Zookeeper expose the same port in a headless and standard service, which can cause conflicts. To avoid this, we use a different port on the standard (public) service.
* Istio Sidecar is disabled for Zookeeper. It should be possible to get Zookeeper working, but for now this example showcases strictly Kafka with Istio.
22 changes: 22 additions & 0 deletions perf/stability/kafka/templates/client.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client
template:
metadata:
labels:
app: kafka-client
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
prometheus.io/scheme: "http"
spec:
containers:
- name: kafka
image: howardjohn/kafka-perf-client
imagePullPolicy: Always
13 changes: 13 additions & 0 deletions perf/stability/kafka/templates/istio-headless.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
name: kafka
spec:
hosts:
- kafka-0.kafka-headless.{{ .Release.Namespace }}.svc.cluster.local
- kafka-1.kafka-headless.{{ .Release.Namespace }}.svc.cluster.local
- kafka-2.kafka-headless.{{ .Release.Namespace }}.svc.cluster.local
ports:
- name: broker
number: 9092
protocol: TCP
149 changes: 149 additions & 0 deletions perf/stability/kafka/templates/kafka.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
---
# Source: kafka/templates/service-brokers.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka
labels:
app: kafka
chart: kafka-0.14.5
release: kafka
heritage: Tiller
spec:
ports:
- name: broker
port: 19092
targetPort: kafka
selector:
app: kafka
release: kafka

---
# Source: kafka/templates/service-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-headless
labels:
app: kafka
chart: kafka-0.14.5
release: kafka
heritage: Tiller
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
ports:
- name: broker
port: 9092
clusterIP: None
selector:
app: kafka
release: kafka

---
# Source: kafka/templates/statefulset.yaml

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: kafka
labels:
app: kafka
chart: kafka-0.14.5
release: kafka
heritage: Tiller
spec:
serviceName: kafka-headless
podManagementPolicy: OrderedReady
updateStrategy:
type: OnDelete

replicas: 3
template:
metadata:
annotations:
# sidecar.istio.io/inject: "false"
labels:
app: kafka
release: kafka
spec:
containers:
- name: kafka-broker
image: "confluentinc/cp-kafka:5.0.1"
imagePullPolicy: "IfNotPresent"
livenessProbe:
exec:
command:
- sh
- -ec
- /usr/bin/jps | /bin/grep -q SupportedKafka
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: kafka
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
ports:
- containerPort: 9092
name: kafka
resources:
{}

env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KAFKA_HEAP_OPTS
value: -Xmx1G -Xms1G
- name: KAFKA_ZOOKEEPER_CONNECT
value: "kafka-zookeeper:12181"
- name: KAFKA_LOG_DIRS
value: "/opt/kafka/data/logs"
- name: "KAFKA_CONFLUENT_SUPPORT_METRICS_ENABLE"
value: "false"
- name: "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR"
value: "3"
- name: KAFKA_JMX_PORT
value: "5555"
# This is required because the Downward API does not yet support identification of
# pod numbering in statefulsets. Thus, we are required to specify a command which
# allows us to extract the pod ID for usage as the Kafka Broker ID.
# See: https://github.com/kubernetes/kubernetes/issues/31218
command:
- sh
- -exc
- |
unset KAFKA_PORT && \
export KAFKA_BROKER_ID=${POD_NAME##*-} && \
export KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://${POD_NAME}.kafka-headless.${POD_NAMESPACE}:9092 && \
exec /etc/confluent/docker/run
volumeMounts:
- name: datadir
mountPath: "/opt/kafka/data"
volumes:
terminationGracePeriodSeconds: 60
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

---
# Source: kafka/charts/zookeeper/templates/config-jmx-exporter.yaml

Loading