Skip to content
This repository has been archived by the owner on Mar 13, 2023. It is now read-only.

feat: impliment NullCache #710

Merged
merged 3 commits into from
Nov 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions naff/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
NotFound,
)
from naff.client.smart_cache import GlobalCache
from naff.client.utils import NullCache
from naff.client.utils.input_utils import get_first_word, get_args
from naff.client.utils.misc_utils import get_event_name, wrap_partial
from naff.client.utils.serializer import to_image_data
Expand Down Expand Up @@ -527,6 +528,16 @@ def _sanity_check(self) -> None:
if len(self.processors) == 0:
self.logger.warning("No Processors are loaded! This means no events will be processed!")

caches = [
c[0]
for c in inspect.getmembers(self.cache, predicate=lambda x: isinstance(x, dict))
if not c[0].startswith("__")
]
for cache in caches:
_cache_obj = getattr(self.cache, cache)
if isinstance(_cache_obj, NullCache):
self.logger.warning(f"{cache} has been disabled")

async def generate_prefixes(self, bot: "Client", message: Message) -> str | Iterable[str]:
"""
A method to get the bot's default_prefix, can be overridden to add dynamic prefixes.
Expand Down
6 changes: 4 additions & 2 deletions naff/client/smart_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from naff.client.const import Absent, MISSING, get_logger
from naff.client.errors import NotFound, Forbidden
from naff.client.utils.cache import TTLCache
from naff.client.utils.cache import TTLCache, NullCache
from naff.models import VoiceState
from naff.models.discord.channel import BaseChannel, GuildChannel, ThreadChannel
from naff.models.discord.emoji import CustomEmoji
Expand All @@ -29,7 +29,7 @@

def create_cache(
ttl: Optional[int] = 60, hard_limit: Optional[int] = 250, soft_limit: Absent[Optional[int]] = MISSING
) -> Union[dict, TTLCache]:
) -> Union[dict, TTLCache, NullCache]:
"""
Create a cache object based on the parameters passed.

Expand All @@ -46,6 +46,8 @@ def create_cache(
"""
if ttl is None and hard_limit is None:
return {}
if ttl == 0 and hard_limit == 0 and soft_limit == 0:
return NullCache()
else:
if not soft_limit:
soft_limit = int(hard_limit / 4) if hard_limit else 50
Expand Down
13 changes: 12 additions & 1 deletion naff/client/utils/cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,23 @@

import attrs

__all__ = ("TTLItem", "TTLCache")
__all__ = ("TTLItem", "TTLCache", "NullCache")

KT = TypeVar("KT")
VT = TypeVar("VT")


class NullCache(dict):
"""
A special cache that will always return None

Effectively just a lazy way to disable caching.
"""

def __setitem__(self, key, value) -> None:
pass


@attrs.define(eq=False, order=False, hash=False, kw_only=False)
class TTLItem(Generic[VT]):
value: VT = attrs.field(
Expand Down
24 changes: 14 additions & 10 deletions naff/ext/debug_extension/utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import datetime
import inspect
import weakref
from typing import TYPE_CHECKING, Any, Optional, Union

from naff.client.utils.cache import TTLCache
from naff.client.utils.cache import TTLCache, NullCache
from naff.models import Embed, MaterialColors

if TYPE_CHECKING:
Expand All @@ -28,30 +29,33 @@ def debug_embed(title: str, **kwargs) -> Embed:

def get_cache_state(bot: "Client") -> str:
"""Create a nicely formatted table of internal cache state."""
caches = [
c[0]
caches = {
c[0]: getattr(bot.cache, c[0])
for c in inspect.getmembers(bot.cache, predicate=lambda x: isinstance(x, dict))
if not c[0].startswith("__")
]
}
caches["endpoints"] = bot.http._endpoints
caches["rate_limits"] = bot.http.ratelimit_locks
table = []

for cache in caches:
val = getattr(bot.cache, cache)
for cache, val in caches.items():

if isinstance(val, TTLCache):
amount = [len(val), f"{val.hard_limit}({val.soft_limit})"]
expire = f"{val.ttl}s"
elif isinstance(val, NullCache):
amount = ("DISABLED",)
expire = "N/A"
elif isinstance(val, (weakref.WeakValueDictionary, weakref.WeakKeyDictionary)):
amount = [len(val), "∞"]
expire = "w_ref"
else:
amount = [len(val), "∞"]
expire = "none"

row = [cache.removesuffix("_cache"), amount, expire]
table.append(row)

# http caches
table.append(["endpoints", [len(bot.http._endpoints), "∞"], "none"])
table.append(["ratelimits", [len(bot.http.ratelimit_locks), "∞"], "w_ref"])

adjust_subcolumn(table, 1, aligns=[">", "<"])

labels = ["Cache", "Amount", "Expire"]
Expand Down