Skip to content

Commit

Permalink
[text] fix whisper tokenzier in multiprocess env
Browse files Browse the repository at this point in the history
  • Loading branch information
Mddct committed Nov 28, 2023
1 parent a13dedf commit ec2d838
Showing 1 changed file with 36 additions and 16 deletions.
52 changes: 36 additions & 16 deletions wenet/text/whisper_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,56 +17,76 @@ def __init__(
*args,
**kwargs,
) -> None:
from whisper.tokenizer import get_tokenizer
self.tokenizer = get_tokenizer(multilingual=multilingual,
num_languages=num_languages,
language=language,
task=task)
# NOTE(Mddct): don't build here, pickle issues
self.tokenizer = None
# TODO: we don't need this in future
self.multilingual = multilingual
self.num_languages = num_languages
self.language = language
self.task = task

if not isinstance(non_lang_syms, List):
self.non_lang_syms = read_non_lang_symbols(non_lang_syms)
else:
# non_lang_syms=["{NOISE}"]
self.non_lang_syms = non_lang_syms
# TODO(Mddct): add special tokens, like non_lang_syms
del self.non_lang_syms
self.t2i = {}
self.i2t = {}
for i in range(self.tokenizer.encoding.n_vocab):
unit = str(self.tokenizer.encoding.decode_single_token_bytes(i))
if len(unit) == 0:
unit = str(i)
unit = unit.replace(" ", "<space>")
# unit = bytes(unit, 'utf-8')
self.t2i[unit] = i
self.i2t[i] = unit
assert len(self.t2i) == len(self.i2t)

def _build_tiktoken(self):
if self.tokenizer is None:
from whisper.tokenizer import get_tokenizer
self.tokenizer = get_tokenizer(multilingual=self.multilingual,
num_languages=self.num_languages,
language=self.language,
task=self.task)
self.t2i = {}
self.i2t = {}
for i in range(self.tokenizer.encoding.n_vocab):
unit = str(
self.tokenizer.encoding.decode_single_token_bytes(i))
if len(unit) == 0:
unit = str(i)
unit = unit.replace(" ", "<space>")
# unit = bytes(unit, 'utf-8')
self.t2i[unit] = i
self.i2t[i] = unit
assert len(self.t2i) == len(self.i2t)

def tokenize(self, line: str) -> Tuple[List[str], List[int]]:
self._build_tiktoken()
ids = self.tokenizer.encoding.encode(line)
text = [self.i2t[d] for d in ids]
return text, ids

def detokenize(self, ids: List[int]) -> Tuple[str, List[str]]:
self._build_tiktoken()
tokens = [self.i2t[d] for d in ids]
text = self.tokenizer.encoding.decode(ids)
return text, tokens

def text2tokens(self, line: str) -> List[str]:
self._build_tiktoken()
return self.tokenize(line)[0]

def tokens2text(self, tokens: List[str]) -> str:
self._build_tiktoken()
ids = [self.t2i[t] for t in tokens]
return self.detokenize(ids)[0]

def tokens2ids(self, tokens: List[str]) -> List[int]:
self._build_tiktoken()
ids = [self.t2i[t] for t in tokens]
return ids

def ids2tokens(self, ids: List[int]) -> List[str]:
self._build_tiktoken()
return [self.tokenizer.encoding.decode([id]) for id in ids]

def vocab_size(self) -> int:
self._build_tiktoken()
return len(self.t2i)

def symbol_table(self) -> Dict[str, int]:
self._build_tiktoken()
return self.t2i

0 comments on commit ec2d838

Please sign in to comment.