Skip to content

Commit

Permalink
refactor: clean up GPT2Tokenizer implementation by removing unused co…
Browse files Browse the repository at this point in the history
…de and comments

Signed-off-by: -LAN- <[email protected]>
  • Loading branch information
laipz8200 committed Jan 7, 2025
1 parent c055792 commit 2d18ab6
Showing 1 changed file with 5 additions and 5 deletions.
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from os.path import abspath, dirname, join
from threading import Lock
from typing import Any

Expand All @@ -21,7 +20,7 @@ def _get_num_tokens_by_gpt2(text: str) -> int:
@staticmethod
def get_num_tokens(text: str) -> int:
# Because this process needs more cpu resource, we turn this back before we find a better way to handle it.
#
#
# future = _executor.submit(GPT2Tokenizer._get_num_tokens_by_gpt2, text)
# result = future.result()
# return cast(int, result)
Expand All @@ -32,10 +31,11 @@ def get_encoder() -> Any:
global _tokenizer, _lock
with _lock:
if _tokenizer is None:
base_path = abspath(__file__)
gpt2_tokenizer_path = join(dirname(base_path), "gpt2")
# _tokenizer = TransformerGPT2Tokenizer.from_pretrained(gpt2_tokenizer_path)
# Try to use tiktoken to get the tokenizer because it is faster
#
_tokenizer = tiktoken.get_encoding("gpt2")
# base_path = abspath(__file__)
# gpt2_tokenizer_path = join(dirname(base_path), "gpt2")
# _tokenizer = TransformerGPT2Tokenizer.from_pretrained(gpt2_tokenizer_path)

return _tokenizer

0 comments on commit 2d18ab6

Please sign in to comment.