Skip to content

Tokenizers

__all__ = ['BaseTokenizer', 'OpenAiTokenizer', 'CohereTokenizer', 'HuggingFaceTokenizer', 'AnthropicTokenizer', 'BedrockTitanTokenizer', 'BedrockJurassicTokenizer', 'BedrockClaudeTokenizer', 'BedrockLlamaTokenizer', 'SimpleTokenizer'] module-attribute

AnthropicTokenizer

Bases: BaseTokenizer

Source code in griptape/griptape/tokenizers/anthropic_tokenizer.py
@define(frozen=True)
class AnthropicTokenizer(BaseTokenizer):
    DEFAULT_MODEL = "claude-2.1"
    MODEL_PREFIXES_TO_MAX_TOKENS = {"claude-2.1": 200000, "claude": 100000}

    model: str = field(kw_only=True)
    max_tokens: int = field(kw_only=True, default=Factory(lambda self: self.default_max_tokens(), takes_self=True))
    client: Anthropic = field(
        default=Factory(lambda: import_optional_dependency("anthropic").Anthropic()), kw_only=True
    )

    def default_max_tokens(self) -> int:
        tokens = next(v for k, v in self.MODEL_PREFIXES_TO_MAX_TOKENS.items() if self.model.startswith(k))

        return tokens

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return self.client.count_tokens(text)
        else:
            raise ValueError("Text must be a string.")

DEFAULT_MODEL = 'claude-2.1' class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_TOKENS = {'claude-2.1': 200000, 'claude': 100000} class-attribute instance-attribute

client: Anthropic = field(default=Factory(lambda : import_optional_dependency('anthropic').Anthropic()), kw_only=True) class-attribute instance-attribute

max_tokens: int = field(kw_only=True, default=Factory(lambda : self.default_max_tokens(), takes_self=True)) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/griptape/tokenizers/anthropic_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return self.client.count_tokens(text)
    else:
        raise ValueError("Text must be a string.")

default_max_tokens()

Source code in griptape/griptape/tokenizers/anthropic_tokenizer.py
def default_max_tokens(self) -> int:
    tokens = next(v for k, v in self.MODEL_PREFIXES_TO_MAX_TOKENS.items() if self.model.startswith(k))

    return tokens

BaseTokenizer

Bases: ABC

Source code in griptape/griptape/tokenizers/base_tokenizer.py
@define(frozen=True)
class BaseTokenizer(ABC):
    stop_sequences: list[str] = field(default=Factory(lambda: [utils.constants.RESPONSE_STOP_SEQUENCE]), kw_only=True)
    max_tokens: int = field(kw_only=True)

    def count_tokens_left(self, text: str | list) -> int:
        diff = self.max_tokens - self.count_tokens(text)

        if diff > 0:
            return diff
        else:
            return 0

    @abstractmethod
    def count_tokens(self, text: str | list[dict]) -> int:
        ...

max_tokens: int = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(default=Factory(lambda : [utils.constants.RESPONSE_STOP_SEQUENCE]), kw_only=True) class-attribute instance-attribute

count_tokens(text) abstractmethod

Source code in griptape/griptape/tokenizers/base_tokenizer.py
@abstractmethod
def count_tokens(self, text: str | list[dict]) -> int:
    ...

count_tokens_left(text)

Source code in griptape/griptape/tokenizers/base_tokenizer.py
def count_tokens_left(self, text: str | list) -> int:
    diff = self.max_tokens - self.count_tokens(text)

    if diff > 0:
        return diff
    else:
        return 0

BedrockClaudeTokenizer

Bases: AnthropicTokenizer

Source code in griptape/griptape/tokenizers/bedrock_claude_tokenizer.py
5
6
7
8
@define(frozen=True)
class BedrockClaudeTokenizer(AnthropicTokenizer):
    DEFAULT_MODEL = "anthropic.claude-v2:1"
    MODEL_PREFIXES_TO_MAX_TOKENS = {"anthropic.claude-v2:1": 200000, "anthropic.claude": 100000}

DEFAULT_MODEL = 'anthropic.claude-v2:1' class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_TOKENS = {'anthropic.claude-v2:1': 200000, 'anthropic.claude': 100000} class-attribute instance-attribute

BedrockJurassicTokenizer

Bases: SimpleTokenizer

Source code in griptape/griptape/tokenizers/bedrock_jurassic_tokenizer.py
@define(frozen=True)
class BedrockJurassicTokenizer(SimpleTokenizer):
    DEFAULT_MODEL = "ai21.j2-ultra-v1"
    DEFAULT_MAX_TOKENS = 8192
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning

    characters_per_token: int = field(
        default=Factory(lambda self: self.DEFAULT_CHARACTERS_PER_TOKEN, takes_self=True), kw_only=True
    )
    max_tokens: int = field(default=Factory(lambda self: self.DEFAULT_MAX_TOKENS, takes_self=True), kw_only=True)
    model: str = field(kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 8192 class-attribute instance-attribute

DEFAULT_MODEL = 'ai21.j2-ultra-v1' class-attribute instance-attribute

characters_per_token: int = field(default=Factory(lambda : self.DEFAULT_CHARACTERS_PER_TOKEN, takes_self=True), kw_only=True) class-attribute instance-attribute

max_tokens: int = field(default=Factory(lambda : self.DEFAULT_MAX_TOKENS, takes_self=True), kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

BedrockLlamaTokenizer

Bases: SimpleTokenizer

Source code in griptape/griptape/tokenizers/bedrock_llama_tokenizer.py
@define(frozen=True)
class BedrockLlamaTokenizer(SimpleTokenizer):
    DEFAULT_MODEL = "meta.llama2-13b-chat-v1"
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning
    DEFAULT_MAX_TOKENS = 2048

    characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True)
    max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True)
    stop_sequences: list[str] = field(factory=list, kw_only=True)
    model: str = field(kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 2048 class-attribute instance-attribute

DEFAULT_MODEL = 'meta.llama2-13b-chat-v1' class-attribute instance-attribute

characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True) class-attribute instance-attribute

max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(factory=list, kw_only=True) class-attribute instance-attribute

BedrockTitanTokenizer

Bases: SimpleTokenizer

Source code in griptape/griptape/tokenizers/bedrock_titan_tokenizer.py
@define(frozen=True)
class BedrockTitanTokenizer(SimpleTokenizer):
    DEFAULT_MODEL = "amazon.titan-text-express-v1"
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning
    DEFAULT_MAX_TOKENS = 4096

    characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True)
    max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True)
    stop_sequences: list[str] = field(default=Factory(lambda: ["User:"]), kw_only=True)
    model: str = field(kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 4096 class-attribute instance-attribute

DEFAULT_MODEL = 'amazon.titan-text-express-v1' class-attribute instance-attribute

characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True) class-attribute instance-attribute

max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(default=Factory(lambda : ['User:']), kw_only=True) class-attribute instance-attribute

CohereTokenizer

Bases: BaseTokenizer

Source code in griptape/griptape/tokenizers/cohere_tokenizer.py
@define(frozen=True)
class CohereTokenizer(BaseTokenizer):
    DEFAULT_MODEL = "command"
    DEFAULT_MAX_TOKENS = 2048

    model: str = field(kw_only=True)
    client: Client = field(kw_only=True)
    max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return len(self.client.tokenize(text=text).tokens)
        else:
            raise ValueError("Text must be a string.")

DEFAULT_MAX_TOKENS = 2048 class-attribute instance-attribute

DEFAULT_MODEL = 'command' class-attribute instance-attribute

client: Client = field(kw_only=True) class-attribute instance-attribute

max_tokens: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/griptape/tokenizers/cohere_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return len(self.client.tokenize(text=text).tokens)
    else:
        raise ValueError("Text must be a string.")

HuggingFaceTokenizer

Bases: BaseTokenizer

Source code in griptape/griptape/tokenizers/huggingface_tokenizer.py
@define(frozen=True)
class HuggingFaceTokenizer(BaseTokenizer):
    tokenizer: PreTrainedTokenizerBase = field(kw_only=True)
    max_tokens: int = field(
        default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True
    )

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return len(self.tokenizer.encode(text))
        else:
            raise ValueError("Text must be a string.")

max_tokens: int = field(default=Factory(lambda : self.tokenizer.model_max_length, takes_self=True), kw_only=True) class-attribute instance-attribute

tokenizer: PreTrainedTokenizerBase = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/griptape/tokenizers/huggingface_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return len(self.tokenizer.encode(text))
    else:
        raise ValueError("Text must be a string.")

OpenAiTokenizer

Bases: BaseTokenizer

Source code in griptape/griptape/tokenizers/openai_tokenizer.py
@define(frozen=True)
class OpenAiTokenizer(BaseTokenizer):
    DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = "text-davinci-003"
    DEFAULT_OPENAI_GPT_3_CHAT_MODEL = "gpt-3.5-turbo"
    DEFAULT_OPENAI_GPT_4_MODEL = "gpt-4"
    DEFAULT_ENCODING = "cl100k_base"
    DEFAULT_MAX_TOKENS = 2049
    TOKEN_OFFSET = 8

    # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
    MODEL_PREFIXES_TO_MAX_TOKENS = {
        "gpt-4-1106": 128000,
        "gpt-4-32k": 32768,
        "gpt-4": 8192,
        "gpt-3.5-turbo-16k": 16384,
        "gpt-3.5-turbo": 4096,
        "gpt-35-turbo-16k": 16384,
        "gpt-35-turbo": 4096,
        "text-davinci-003": 4097,
        "text-davinci-002": 4097,
        "code-davinci-002": 8001,
        "text-embedding-ada-002": 8191,
        "text-embedding-ada-001": 2046,
    }

    EMBEDDING_MODELS = ["text-embedding-ada-002", "text-embedding-ada-001"]

    model: str = field(kw_only=True)
    max_tokens: int = field(kw_only=True, default=Factory(lambda self: self.default_max_tokens(), takes_self=True))

    @property
    def encoding(self) -> tiktoken.Encoding:
        try:
            return tiktoken.encoding_for_model(self.model)
        except KeyError:
            return tiktoken.get_encoding(self.DEFAULT_ENCODING)

    def default_max_tokens(self) -> int:
        tokens = next(v for k, v in self.MODEL_PREFIXES_TO_MAX_TOKENS.items() if self.model.startswith(k))
        offset = 0 if self.model in self.EMBEDDING_MODELS else self.TOKEN_OFFSET

        return (tokens if tokens else self.DEFAULT_MAX_TOKENS) - offset

    def count_tokens(self, text: str | list[dict], model: str | None = None) -> int:
        """
        Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook:
        https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
        """
        if isinstance(text, list):
            model = model if model else self.model

            try:
                encoding = tiktoken.encoding_for_model(model)
            except KeyError:
                logging.warning("model not found. Using cl100k_base encoding.")

                encoding = tiktoken.get_encoding("cl100k_base")

            if model in {
                "gpt-3.5-turbo-0613",
                "gpt-3.5-turbo-16k-0613",
                "gpt-4-0314",
                "gpt-4-32k-0314",
                "gpt-4-0613",
                "gpt-4-32k-0613",
            }:
                tokens_per_message = 3
                tokens_per_name = 1
            elif model == "gpt-3.5-turbo-0301":
                # every message follows <|start|>{role/name}\n{content}<|end|>\n
                tokens_per_message = 4
                # if there's a name, the role is omitted
                tokens_per_name = -1
            elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
                logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
                return self.count_tokens(text, model="gpt-3.5-turbo-0613")
            elif "gpt-4" in model:
                logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
                return self.count_tokens(text, model="gpt-4-0613")
            else:
                raise NotImplementedError(
                    f"""token_count() is not implemented for model {model}. 
                    See https://github.com/openai/openai-python/blob/main/chatml.md for 
                    information on how messages are converted to tokens."""
                )

            num_tokens = 0

            for message in text:
                num_tokens += tokens_per_message
                for key, value in message.items():
                    num_tokens += len(encoding.encode(value))
                    if key == "name":
                        num_tokens += tokens_per_name

            # every reply is primed with <|start|>assistant<|message|>
            num_tokens += 3

            return num_tokens
        else:
            return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

DEFAULT_ENCODING = 'cl100k_base' class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 2049 class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_CHAT_MODEL = 'gpt-3.5-turbo' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = 'text-davinci-003' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_4_MODEL = 'gpt-4' class-attribute instance-attribute

EMBEDDING_MODELS = ['text-embedding-ada-002', 'text-embedding-ada-001'] class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_TOKENS = {'gpt-4-1106': 128000, 'gpt-4-32k': 32768, 'gpt-4': 8192, 'gpt-3.5-turbo-16k': 16384, 'gpt-3.5-turbo': 4096, 'gpt-35-turbo-16k': 16384, 'gpt-35-turbo': 4096, 'text-davinci-003': 4097, 'text-davinci-002': 4097, 'code-davinci-002': 8001, 'text-embedding-ada-002': 8191, 'text-embedding-ada-001': 2046} class-attribute instance-attribute

TOKEN_OFFSET = 8 class-attribute instance-attribute

encoding: tiktoken.Encoding property

max_tokens: int = field(kw_only=True, default=Factory(lambda : self.default_max_tokens(), takes_self=True)) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

count_tokens(text, model=None)

Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb

Source code in griptape/griptape/tokenizers/openai_tokenizer.py
def count_tokens(self, text: str | list[dict], model: str | None = None) -> int:
    """
    Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook:
    https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
    """
    if isinstance(text, list):
        model = model if model else self.model

        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            logging.warning("model not found. Using cl100k_base encoding.")

            encoding = tiktoken.get_encoding("cl100k_base")

        if model in {
            "gpt-3.5-turbo-0613",
            "gpt-3.5-turbo-16k-0613",
            "gpt-4-0314",
            "gpt-4-32k-0314",
            "gpt-4-0613",
            "gpt-4-32k-0613",
        }:
            tokens_per_message = 3
            tokens_per_name = 1
        elif model == "gpt-3.5-turbo-0301":
            # every message follows <|start|>{role/name}\n{content}<|end|>\n
            tokens_per_message = 4
            # if there's a name, the role is omitted
            tokens_per_name = -1
        elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
            logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
            return self.count_tokens(text, model="gpt-3.5-turbo-0613")
        elif "gpt-4" in model:
            logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
            return self.count_tokens(text, model="gpt-4-0613")
        else:
            raise NotImplementedError(
                f"""token_count() is not implemented for model {model}. 
                See https://github.com/openai/openai-python/blob/main/chatml.md for 
                information on how messages are converted to tokens."""
            )

        num_tokens = 0

        for message in text:
            num_tokens += tokens_per_message
            for key, value in message.items():
                num_tokens += len(encoding.encode(value))
                if key == "name":
                    num_tokens += tokens_per_name

        # every reply is primed with <|start|>assistant<|message|>
        num_tokens += 3

        return num_tokens
    else:
        return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

default_max_tokens()

Source code in griptape/griptape/tokenizers/openai_tokenizer.py
def default_max_tokens(self) -> int:
    tokens = next(v for k, v in self.MODEL_PREFIXES_TO_MAX_TOKENS.items() if self.model.startswith(k))
    offset = 0 if self.model in self.EMBEDDING_MODELS else self.TOKEN_OFFSET

    return (tokens if tokens else self.DEFAULT_MAX_TOKENS) - offset

SimpleTokenizer

Bases: BaseTokenizer

Source code in griptape/griptape/tokenizers/simple_tokenizer.py
@define(frozen=True)
class SimpleTokenizer(BaseTokenizer):
    characters_per_token: int = field(kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token

            return num_tokens
        else:
            raise ValueError("Text must be a string.")

characters_per_token: int = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/griptape/tokenizers/simple_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token

        return num_tokens
    else:
        raise ValueError("Text must be a string.")