Skip to content

Huggingface tokenizer

HuggingFaceTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/huggingface_tokenizer.py
@define()
class HuggingFaceTokenizer(BaseTokenizer):
    tokenizer: PreTrainedTokenizerBase = field(kw_only=True)
    model: str = field(init=False, kw_only=True)
    max_input_tokens: int = field(
        default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True
    )
    max_output_tokens: int = field(kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return len(self.tokenizer.encode(text))
        else:
            raise ValueError("Text must be a string.")

max_input_tokens: int = field(default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True) class-attribute instance-attribute

max_output_tokens: int = field(kw_only=True) class-attribute instance-attribute

model: str = field(init=False, kw_only=True) class-attribute instance-attribute

tokenizer: PreTrainedTokenizerBase = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/huggingface_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return len(self.tokenizer.encode(text))
    else:
        raise ValueError("Text must be a string.")