diff --git a/ddtrace/_monkey.py b/ddtrace/_monkey.py index f935b8b9f13..6a301f338a7 100644 --- a/ddtrace/_monkey.py +++ b/ddtrace/_monkey.py @@ -30,7 +30,6 @@ # Default set of modules to automatically patch or not PATCH_MODULES = { - "aioredis": True, "aiomysql": True, "aredis": True, "asyncio": True, diff --git a/ddtrace/contrib/integration_registry/mappings.py b/ddtrace/contrib/integration_registry/mappings.py index f079c5f25b8..922ed6c1b79 100644 --- a/ddtrace/contrib/integration_registry/mappings.py +++ b/ddtrace/contrib/integration_registry/mappings.py @@ -8,7 +8,6 @@ "asgi", "wsgi", "boto", - "aioredis", "pytest_bdd", "urllib", "webbrowser", diff --git a/ddtrace/contrib/integration_registry/registry.yaml b/ddtrace/contrib/integration_registry/registry.yaml index 56ea8a82f51..4e746e14902 100644 --- a/ddtrace/contrib/integration_registry/registry.yaml +++ b/ddtrace/contrib/integration_registry/registry.yaml @@ -53,12 +53,6 @@ integrations: min: 0.16.0 max: 1.4.0 -- integration_name: aioredis - is_external_package: true - is_tested: false - dependency_names: - - aioredis - - integration_name: algoliasearch is_external_package: true is_tested: true diff --git a/ddtrace/contrib/internal/aioredis/__init__.py b/ddtrace/contrib/internal/aioredis/__init__.py deleted file mode 100644 index 3fcc7750952..00000000000 --- a/ddtrace/contrib/internal/aioredis/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -The aioredis integration instruments aioredis requests. Version 1.3 and above are fully -supported. - - -Enabling -~~~~~~~~ - -The aioredis integration is enabled automatically when using -:ref:`ddtrace-run` or :ref:`import ddtrace.auto`. - -Or use :func:`patch() ` to manually enable the integration:: - - from ddtrace import patch - patch(aioredis=True) - - -Global Configuration -~~~~~~~~~~~~~~~~~~~~ - -.. py:data:: ddtrace.config.aioredis["service"] - - The service name reported by default for aioredis instances. - - This option can also be set with the ``DD_AIOREDIS_SERVICE`` environment - variable. - - Default: ``"redis"`` - -.. py:data:: ddtrace.config.aioredis["cmd_max_length"] - - Max allowable size for the aioredis command span tag. - Anything beyond the max length will be replaced with ``"..."``. - - This option can also be set with the ``DD_AIOREDIS_CMD_MAX_LENGTH`` environment - variable. - - Default: ``1000`` - -.. py:data:: ddtrace.config.aioedis["resource_only_command"] - - The span resource will only include the command executed. To include all - arguments in the span resource, set this value to ``False``. - - This option can also be set with the ``DD_REDIS_RESOURCE_ONLY_COMMAND`` environment - variable. - - Default: ``True`` - - -Instance Configuration -~~~~~~~~~~~~~~~~~~~~~~ - -To configure the aioredis integration on a per-instance basis use the -``Pin`` API:: - - import aioredis - from ddtrace._trace.pin import Pin - - myaioredis = aioredis.Aioredis() - Pin.override(myaioredis, service="myaioredis") -""" diff --git a/ddtrace/contrib/internal/aioredis/patch.py b/ddtrace/contrib/internal/aioredis/patch.py deleted file mode 100644 index 96945d1d33a..00000000000 --- a/ddtrace/contrib/internal/aioredis/patch.py +++ /dev/null @@ -1,234 +0,0 @@ -import asyncio -import os -import sys -from typing import Dict - -import aioredis -from wrapt import wrap_function_wrapper as _w - -from ddtrace import config -from ddtrace._trace.pin import Pin -from ddtrace.constants import _SPAN_MEASURED_KEY -from ddtrace.constants import SPAN_KIND -from ddtrace.contrib import trace_utils -from ddtrace.contrib.internal.redis_utils import ROW_RETURNING_COMMANDS -from ddtrace.contrib.internal.redis_utils import _instrument_redis_cmd -from ddtrace.contrib.internal.redis_utils import _instrument_redis_execute_pipeline -from ddtrace.contrib.internal.redis_utils import _run_redis_command_async -from ddtrace.contrib.internal.redis_utils import determine_row_count -from ddtrace.ext import SpanKind -from ddtrace.ext import SpanTypes -from ddtrace.ext import db -from ddtrace.ext import net -from ddtrace.ext import redis as redisx -from ddtrace.internal.constants import COMPONENT -from ddtrace.internal.schema import schematize_cache_operation -from ddtrace.internal.schema import schematize_service_name -from ddtrace.internal.utils.formats import CMD_MAX_LEN -from ddtrace.internal.utils.formats import asbool -from ddtrace.internal.utils.formats import stringify_cache_args -from ddtrace.internal.utils.wrappers import unwrap as _u -from ddtrace.vendor.packaging.version import parse as parse_version - - -try: - from aioredis.commands.transaction import _RedisBuffer -except ImportError: - _RedisBuffer = None - -config._add( - "aioredis", - dict( - _default_service=schematize_service_name("redis"), - cmd_max_length=int(os.getenv("DD_AIOREDIS_CMD_MAX_LENGTH", CMD_MAX_LEN)), - resource_only_command=asbool(os.getenv("DD_REDIS_RESOURCE_ONLY_COMMAND", True)), - ), -) - -aioredis_version_str = getattr(aioredis, "__version__", "") -aioredis_version = parse_version(aioredis_version_str) -V2 = parse_version("2.0") - - -def get_version() -> str: - return aioredis_version_str - - -def _supported_versions() -> Dict[str, str]: - return {"aioredis": "*"} - - -def patch(): - if getattr(aioredis, "_datadog_patch", False): - return - aioredis._datadog_patch = True - pin = Pin() - if aioredis_version >= V2: - _w("aioredis.client", "Redis.execute_command", traced_execute_command) - _w("aioredis.client", "Redis.pipeline", traced_pipeline) - _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline) - pin.onto(aioredis.client.Redis) - else: - _w("aioredis", "Redis.execute", traced_13_execute_command) - _w("aioredis", "Redis.pipeline", traced_13_pipeline) - _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline) - pin.onto(aioredis.Redis) - - -def unpatch(): - if not getattr(aioredis, "_datadog_patch", False): - return - - aioredis._datadog_patch = False - if aioredis_version >= V2: - _u(aioredis.client.Redis, "execute_command") - _u(aioredis.client.Redis, "pipeline") - _u(aioredis.client.Pipeline, "execute") - else: - _u(aioredis.Redis, "execute") - _u(aioredis.Redis, "pipeline") - _u(aioredis.commands.transaction.Pipeline, "execute") - - -async def traced_execute_command(func, instance, args, kwargs): - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return await func(*args, **kwargs) - - with _instrument_redis_cmd(pin, config.aioredis, instance, args) as ctx: - return await _run_redis_command_async(ctx=ctx, func=func, args=args, kwargs=kwargs) - - -def traced_pipeline(func, instance, args, kwargs): - pipeline = func(*args, **kwargs) - pin = Pin.get_from(instance) - if pin: - pin.onto(pipeline) - return pipeline - - -async def traced_execute_pipeline(func, instance, args, kwargs): - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return await func(*args, **kwargs) - - cmds = [stringify_cache_args(c, cmd_max_len=config.aioredis.cmd_max_length) for c, _ in instance.command_stack] - with _instrument_redis_execute_pipeline(pin, config.aioredis, cmds, instance): - return await func(*args, **kwargs) - - -def traced_13_pipeline(func, instance, args, kwargs): - pipeline = func(*args, **kwargs) - pin = Pin.get_from(instance) - if pin: - pin.onto(pipeline) - return pipeline - - -def traced_13_execute_command(func, instance, args, kwargs): - # If we have a _RedisBuffer then we are in a pipeline - if isinstance(instance.connection, _RedisBuffer): - return func(*args, **kwargs) - - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return func(*args, **kwargs) - - # Don't activate the span since this operation is performed as a future which concludes sometime later on in - # execution so subsequent operations in the stack are not necessarily semantically related - # (we don't want this span to be the parent of all other spans created before the future is resolved) - parent = pin.tracer.current_span() - query = stringify_cache_args(args, cmd_max_len=config.aioredis.cmd_max_length) - span = pin.tracer.start_span( - schematize_cache_operation(redisx.CMD, cache_provider="redis"), - service=trace_utils.ext_service(pin, config.aioredis), - resource=query.split(" ")[0] if config.aioredis.resource_only_command else query, - span_type=SpanTypes.REDIS, - activate=False, - child_of=parent, - ) - # set span.kind to the type of request being performed - span._set_tag_str(SPAN_KIND, SpanKind.CLIENT) - - span._set_tag_str(COMPONENT, config.aioredis.integration_name) - span._set_tag_str(db.SYSTEM, redisx.APP) - # PERF: avoid setting via Span.set_tag - span.set_metric(_SPAN_MEASURED_KEY, 1) - span._set_tag_str(redisx.RAWCMD, query) - if pin.tags: - span.set_tags(pin.tags) - - span.set_tags( - { - net.TARGET_HOST: instance.address[0], - net.TARGET_PORT: instance.address[1], - redisx.DB: instance.db or 0, - } - ) - span.set_metric(redisx.ARGS_LEN, len(args)) - - def _finish_span(future): - try: - # Accessing the result will raise an exception if: - # - The future was cancelled (CancelledError) - # - There was an error executing the future (`future.exception()`) - # - The future is in an invalid state - redis_command = span.resource.split(" ")[0] - future.result() - if redis_command in ROW_RETURNING_COMMANDS: - span.set_metric(db.ROWCOUNT, determine_row_count(redis_command=redis_command, result=future.result())) - except aioredis.CancelledError: - span.set_exc_info(*sys.exc_info()) - if redis_command in ROW_RETURNING_COMMANDS: - span.set_metric(db.ROWCOUNT, 0) - finally: - span.finish() - - task = func(*args, **kwargs) - # Execute command returns a coroutine when no free connections are available - # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191 - task = asyncio.ensure_future(task) - task.add_done_callback(_finish_span) - return task - - -async def traced_13_execute_pipeline(func, instance, args, kwargs): - pin = Pin.get_from(instance) - if not pin or not pin.enabled(): - return await func(*args, **kwargs) - - cmds = [] - for _, cmd, cmd_args, _ in instance._pipeline: - parts = [cmd] - parts.extend(cmd_args) - cmds.append(stringify_cache_args(parts, cmd_max_len=config.aioredis.cmd_max_length)) - - resource = cmds_string = "\n".join(cmds) - if config.aioredis.resource_only_command: - resource = "\n".join([cmd.split(" ")[0] for cmd in cmds]) - - with pin.tracer.trace( - schematize_cache_operation(redisx.CMD, cache_provider="redis"), - resource=resource, - service=trace_utils.ext_service(pin, config.aioredis), - span_type=SpanTypes.REDIS, - ) as span: - # set span.kind to the type of request being performed - span._set_tag_str(SPAN_KIND, SpanKind.CLIENT) - - span._set_tag_str(COMPONENT, config.aioredis.integration_name) - span._set_tag_str(db.SYSTEM, redisx.APP) - span.set_tags( - { - net.TARGET_HOST: instance._pool_or_conn.address[0], - net.TARGET_PORT: instance._pool_or_conn.address[1], - redisx.DB: instance._pool_or_conn.db or 0, - } - ) - - # PERF: avoid setting via Span.set_tag - span.set_metric(_SPAN_MEASURED_KEY, 1) - span._set_tag_str(redisx.RAWCMD, cmds_string) - span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline)) - - return await func(*args, **kwargs) diff --git a/ddtrace/internal/settings/_config.py b/ddtrace/internal/settings/_config.py index 86c7aa289c0..e2200e28a52 100644 --- a/ddtrace/internal/settings/_config.py +++ b/ddtrace/internal/settings/_config.py @@ -109,7 +109,6 @@ "falcon", "langgraph", "litellm", - "aioredis", "test_visibility", "redis", "mako", diff --git a/releasenotes/notes/remove-aioredis-3ebab9a4d3a2fc8f.yaml b/releasenotes/notes/remove-aioredis-3ebab9a4d3a2fc8f.yaml new file mode 100644 index 00000000000..b231bb8f57e --- /dev/null +++ b/releasenotes/notes/remove-aioredis-3ebab9a4d3a2fc8f.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + aioredis: The aioredis integration has been removed. diff --git a/scripts/check_suitespec_coverage.py b/scripts/check_suitespec_coverage.py index e2ad4fa28c4..5594aeb4c51 100755 --- a/scripts/check_suitespec_coverage.py +++ b/scripts/check_suitespec_coverage.py @@ -24,8 +24,6 @@ # Ignore any embedded documentation IGNORE_PATTERNS.add("**/*.md") -# The aioredis integration is deprecated and untested -IGNORE_PATTERNS.add("ddtrace/contrib/aioredis/*") # TODO(taegyunkim): remove these after merging profiling v2 tests back to profiling IGNORE_PATTERNS.add("tests/profiling/*.py") IGNORE_PATTERNS.add("tests/profiling/*/*.py")