Skip to content

Tokenizers

__all__ = ['BaseTokenizer', 'OpenAiTokenizer', 'CohereTokenizer', 'HuggingFaceTokenizer', 'AnthropicTokenizer', 'BedrockTitanTokenizer', 'BedrockCohereTokenizer', 'BedrockJurassicTokenizer', 'BedrockClaudeTokenizer', 'BedrockLlamaTokenizer', 'GoogleTokenizer', 'VoyageAiTokenizer', 'SimpleTokenizer', 'DummyTokenizer'] module-attribute

AnthropicTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/anthropic_tokenizer.py
@define()
class AnthropicTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"claude-3": 200000, "claude-2.1": 200000, "claude": 100000}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"claude": 4096}

    client: Anthropic = field(
        default=Factory(lambda: import_optional_dependency("anthropic").Anthropic()), kw_only=True
    )

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return self.client.count_tokens(text)
        else:
            raise ValueError("Text must be a string.")

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'claude-3': 200000, 'claude-2.1': 200000, 'claude': 100000} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'claude': 4096} class-attribute instance-attribute

client: Anthropic = field(default=Factory(lambda: import_optional_dependency('anthropic').Anthropic()), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/anthropic_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return self.client.count_tokens(text)
    else:
        raise ValueError("Text must be a string.")

BaseTokenizer

Bases: ABC

Source code in griptape/tokenizers/base_tokenizer.py
@define()
class BaseTokenizer(ABC):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {}

    model: str = field(kw_only=True)
    stop_sequences: list[str] = field(default=Factory(lambda: [utils.constants.RESPONSE_STOP_SEQUENCE]), kw_only=True)
    max_input_tokens: int = field(kw_only=True, default=None)
    max_output_tokens: int = field(kw_only=True, default=None)

    def __attrs_post_init__(self) -> None:
        if self.max_input_tokens is None:
            self.max_input_tokens = self._default_max_input_tokens()

        if self.max_output_tokens is None:
            self.max_output_tokens = self._default_max_output_tokens()

    def count_input_tokens_left(self, text: str | list) -> int:
        diff = self.max_input_tokens - self.count_tokens(text)

        if diff > 0:
            return diff
        else:
            return 0

    def count_output_tokens_left(self, text: str | list) -> int:
        diff = self.max_output_tokens - self.count_tokens(text)

        if diff > 0:
            return diff
        else:
            return 0

    @abstractmethod
    def count_tokens(self, text: str | list[dict]) -> int:
        ...

    def _default_max_input_tokens(self) -> int:
        tokens = next((v for k, v in self.MODEL_PREFIXES_TO_MAX_INPUT_TOKENS.items() if self.model.startswith(k)), None)

        if tokens is None:
            raise ValueError(f"Unknown model default max input tokens: {self.model}")
        else:
            return tokens

    def _default_max_output_tokens(self) -> int:
        tokens = next(
            (v for k, v in self.MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS.items() if self.model.startswith(k)), None
        )

        if tokens is None:
            raise ValueError(f"Unknown model for default max output tokens: {self.model}")
        else:
            return tokens

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {} class-attribute instance-attribute

max_input_tokens: int = field(kw_only=True, default=None) class-attribute instance-attribute

max_output_tokens: int = field(kw_only=True, default=None) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(default=Factory(lambda: [utils.constants.RESPONSE_STOP_SEQUENCE]), kw_only=True) class-attribute instance-attribute

__attrs_post_init__()

Source code in griptape/tokenizers/base_tokenizer.py
def __attrs_post_init__(self) -> None:
    if self.max_input_tokens is None:
        self.max_input_tokens = self._default_max_input_tokens()

    if self.max_output_tokens is None:
        self.max_output_tokens = self._default_max_output_tokens()

count_input_tokens_left(text)

Source code in griptape/tokenizers/base_tokenizer.py
def count_input_tokens_left(self, text: str | list) -> int:
    diff = self.max_input_tokens - self.count_tokens(text)

    if diff > 0:
        return diff
    else:
        return 0

count_output_tokens_left(text)

Source code in griptape/tokenizers/base_tokenizer.py
def count_output_tokens_left(self, text: str | list) -> int:
    diff = self.max_output_tokens - self.count_tokens(text)

    if diff > 0:
        return diff
    else:
        return 0

count_tokens(text) abstractmethod

Source code in griptape/tokenizers/base_tokenizer.py
@abstractmethod
def count_tokens(self, text: str | list[dict]) -> int:
    ...

BedrockClaudeTokenizer

Bases: AnthropicTokenizer

Source code in griptape/tokenizers/bedrock_claude_tokenizer.py
@define()
class BedrockClaudeTokenizer(AnthropicTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "anthropic.claude-3": 200000,
        "anthropic.claude-v2:1": 200000,
        "anthropic.claude": 100000,
    }
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"anthropic.claude": 4096}

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'anthropic.claude-3': 200000, 'anthropic.claude-v2:1': 200000, 'anthropic.claude': 100000} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'anthropic.claude': 4096} class-attribute instance-attribute

BedrockCohereTokenizer

Bases: SimpleTokenizer

Source code in griptape/tokenizers/bedrock_cohere_tokenizer.py
@define()
class BedrockCohereTokenizer(SimpleTokenizer):
    # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-embed.html
    DEFAULT_CHARACTERS_PER_TOKEN = 4
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"cohere": 1024}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"cohere": 4096}

    model: str = field(kw_only=True)
    characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 4 class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'cohere': 1024} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'cohere': 4096} class-attribute instance-attribute

characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

BedrockJurassicTokenizer

Bases: SimpleTokenizer

Source code in griptape/tokenizers/bedrock_jurassic_tokenizer.py
@define()
class BedrockJurassicTokenizer(SimpleTokenizer):
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"ai21": 8192}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {
        "ai21.j2-mid-v1": 8191,
        "ai21.j2-ultra-v1": 8191,
        "ai21.j2-large-v1": 8191,
        "ai21": 2048,
    }

    model: str = field(kw_only=True)
    characters_per_token: int = field(
        default=Factory(lambda self: self.DEFAULT_CHARACTERS_PER_TOKEN, takes_self=True), kw_only=True
    )

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'ai21': 8192} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'ai21.j2-mid-v1': 8191, 'ai21.j2-ultra-v1': 8191, 'ai21.j2-large-v1': 8191, 'ai21': 2048} class-attribute instance-attribute

characters_per_token: int = field(default=Factory(lambda self: self.DEFAULT_CHARACTERS_PER_TOKEN, takes_self=True), kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

BedrockLlamaTokenizer

Bases: SimpleTokenizer

Source code in griptape/tokenizers/bedrock_llama_tokenizer.py
@define()
class BedrockLlamaTokenizer(SimpleTokenizer):
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"meta": 2048}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"meta": 2048}

    model: str = field(kw_only=True)
    characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True)
    stop_sequences: list[str] = field(factory=list, kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'meta': 2048} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'meta': 2048} class-attribute instance-attribute

characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(factory=list, kw_only=True) class-attribute instance-attribute

BedrockTitanTokenizer

Bases: SimpleTokenizer

Source code in griptape/tokenizers/bedrock_titan_tokenizer.py
@define()
class BedrockTitanTokenizer(SimpleTokenizer):
    DEFAULT_CHARACTERS_PER_TOKEN = 6  # https://docs.aws.amazon.com/bedrock/latest/userguide/model-customization-prepare.html#model-customization-prepare-finetuning
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"amazon": 4096}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"amazon": 8000}

    model: str = field(kw_only=True)
    characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True)
    stop_sequences: list[str] = field(default=Factory(lambda: ["User:"]), kw_only=True)

DEFAULT_CHARACTERS_PER_TOKEN = 6 class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'amazon': 4096} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'amazon': 8000} class-attribute instance-attribute

characters_per_token: int = field(default=DEFAULT_CHARACTERS_PER_TOKEN, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(default=Factory(lambda: ['User:']), kw_only=True) class-attribute instance-attribute

CohereTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/cohere_tokenizer.py
@define()
class CohereTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"command": 4096}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"command": 4096}

    client: Client = field(kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return len(self.client.tokenize(text=text).tokens)
        else:
            raise ValueError("Text must be a string.")

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'command': 4096} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'command': 4096} class-attribute instance-attribute

client: Client = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/cohere_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return len(self.client.tokenize(text=text).tokens)
    else:
        raise ValueError("Text must be a string.")

DummyTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/dummy_tokenizer.py
@define()
class DummyTokenizer(BaseTokenizer):
    model: str = field(init=False)
    max_input_tokens: int = field(default=0, kw_only=True)
    max_output_tokens: int = field(default=0, kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        raise DummyException(__class__.__name__, "count_tokens")

max_input_tokens: int = field(default=0, kw_only=True) class-attribute instance-attribute

max_output_tokens: int = field(default=0, kw_only=True) class-attribute instance-attribute

model: str = field(init=False) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/dummy_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    raise DummyException(__class__.__name__, "count_tokens")

GoogleTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/google_tokenizer.py
@define()
class GoogleTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"gemini": 30720}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"gemini": 2048}

    api_key: str = field(kw_only=True, metadata={"serializable": True})
    model_client: GenerativeModel = field(
        default=Factory(lambda self: self._default_model_client(), takes_self=True), kw_only=True
    )

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str) or isinstance(text, list):
            return self.model_client.count_tokens(text).total_tokens
        else:
            raise ValueError("Text must be a string or a list.")

    def _default_model_client(self) -> GenerativeModel:
        genai = import_optional_dependency("google.generativeai")
        genai.configure(api_key=self.api_key)

        return genai.GenerativeModel(self.model)

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'gemini': 30720} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'gemini': 2048} class-attribute instance-attribute

api_key: str = field(kw_only=True, metadata={'serializable': True}) class-attribute instance-attribute

model_client: GenerativeModel = field(default=Factory(lambda self: self._default_model_client(), takes_self=True), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/google_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str) or isinstance(text, list):
        return self.model_client.count_tokens(text).total_tokens
    else:
        raise ValueError("Text must be a string or a list.")

HuggingFaceTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/huggingface_tokenizer.py
@define()
class HuggingFaceTokenizer(BaseTokenizer):
    tokenizer: PreTrainedTokenizerBase = field(kw_only=True)
    model: str = field(init=False, kw_only=True)
    max_input_tokens: int = field(
        default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True
    )
    max_output_tokens: int = field(kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return len(self.tokenizer.encode(text))
        else:
            raise ValueError("Text must be a string.")

max_input_tokens: int = field(default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True) class-attribute instance-attribute

max_output_tokens: int = field(kw_only=True) class-attribute instance-attribute

model: str = field(init=False, kw_only=True) class-attribute instance-attribute

tokenizer: PreTrainedTokenizerBase = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/huggingface_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return len(self.tokenizer.encode(text))
    else:
        raise ValueError("Text must be a string.")

OpenAiTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/openai_tokenizer.py
@define()
class OpenAiTokenizer(BaseTokenizer):
    DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = "gpt-3.5-turbo-instruct"
    DEFAULT_OPENAI_GPT_3_CHAT_MODEL = "gpt-3.5-turbo"
    DEFAULT_OPENAI_GPT_4_MODEL = "gpt-4"
    DEFAULT_ENCODING = "cl100k_base"
    DEFAULT_MAX_TOKENS = 2049
    DEFAULT_MAX_OUTPUT_TOKENS = 4096
    TOKEN_OFFSET = 8

    # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "gpt-4-1106": 128000,
        "gpt-4-32k": 32768,
        "gpt-4": 8192,
        "gpt-3.5-turbo-16k": 16384,
        "gpt-3.5-turbo": 4096,
        "gpt-35-turbo-16k": 16384,
        "gpt-35-turbo": 4096,
        "text-embedding-ada-002": 8191,
        "text-embedding-ada-001": 2046,
        "text-embedding-3-small": 8191,
        "text-embedding-3-large": 8191,
    }

    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"gpt": 4096}

    EMBEDDING_MODELS = [
        "text-embedding-ada-002",
        "text-embedding-ada-001",
        "text-embedding-3-small",
        "text-embedding-3-large",
    ]

    @property
    def encoding(self) -> tiktoken.Encoding:
        try:
            return tiktoken.encoding_for_model(self.model)
        except KeyError:
            return tiktoken.get_encoding(self.DEFAULT_ENCODING)

    def _default_max_input_tokens(self) -> int:
        tokens = next((v for k, v in self.MODEL_PREFIXES_TO_MAX_INPUT_TOKENS.items() if self.model.startswith(k)), None)
        offset = 0 if self.model in self.EMBEDDING_MODELS else self.TOKEN_OFFSET

        return (tokens if tokens else self.DEFAULT_MAX_TOKENS) - offset

    def _default_max_output_tokens(self) -> int:
        tokens = next(
            (v for k, v in self.MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS.items() if self.model.startswith(k)), None
        )

        if tokens is None:
            return self.DEFAULT_MAX_OUTPUT_TOKENS
        else:
            return tokens

    def count_tokens(self, text: str | list[dict], model: Optional[str] = None) -> int:
        """
        Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook:
        https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
        """
        if isinstance(text, list):
            model = model if model else self.model

            try:
                encoding = tiktoken.encoding_for_model(model)
            except KeyError:
                logging.warning("model not found. Using cl100k_base encoding.")

                encoding = tiktoken.get_encoding("cl100k_base")

            if model in {
                "gpt-3.5-turbo-0613",
                "gpt-3.5-turbo-16k-0613",
                "gpt-4-0314",
                "gpt-4-32k-0314",
                "gpt-4-0613",
                "gpt-4-32k-0613",
            }:
                tokens_per_message = 3
                tokens_per_name = 1
            elif model == "gpt-3.5-turbo-0301":
                # every message follows <|start|>{role/name}\n{content}<|end|>\n
                tokens_per_message = 4
                # if there's a name, the role is omitted
                tokens_per_name = -1
            elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
                logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
                return self.count_tokens(text, model="gpt-3.5-turbo-0613")
            elif "gpt-4" in model:
                logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
                return self.count_tokens(text, model="gpt-4-0613")
            else:
                raise NotImplementedError(
                    f"""token_count() is not implemented for model {model}. 
                    See https://github.com/openai/openai-python/blob/main/chatml.md for 
                    information on how messages are converted to tokens."""
                )

            num_tokens = 0

            for message in text:
                num_tokens += tokens_per_message
                for key, value in message.items():
                    num_tokens += len(encoding.encode(value))
                    if key == "name":
                        num_tokens += tokens_per_name

            # every reply is primed with <|start|>assistant<|message|>
            num_tokens += 3

            return num_tokens
        else:
            return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

DEFAULT_ENCODING = 'cl100k_base' class-attribute instance-attribute

DEFAULT_MAX_OUTPUT_TOKENS = 4096 class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 2049 class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_CHAT_MODEL = 'gpt-3.5-turbo' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = 'gpt-3.5-turbo-instruct' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_4_MODEL = 'gpt-4' class-attribute instance-attribute

EMBEDDING_MODELS = ['text-embedding-ada-002', 'text-embedding-ada-001', 'text-embedding-3-small', 'text-embedding-3-large'] class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'gpt-4-1106': 128000, 'gpt-4-32k': 32768, 'gpt-4': 8192, 'gpt-3.5-turbo-16k': 16384, 'gpt-3.5-turbo': 4096, 'gpt-35-turbo-16k': 16384, 'gpt-35-turbo': 4096, 'text-embedding-ada-002': 8191, 'text-embedding-ada-001': 2046, 'text-embedding-3-small': 8191, 'text-embedding-3-large': 8191} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'gpt': 4096} class-attribute instance-attribute

TOKEN_OFFSET = 8 class-attribute instance-attribute

encoding: tiktoken.Encoding property

count_tokens(text, model=None)

Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb

Source code in griptape/tokenizers/openai_tokenizer.py
def count_tokens(self, text: str | list[dict], model: Optional[str] = None) -> int:
    """
    Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook:
    https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
    """
    if isinstance(text, list):
        model = model if model else self.model

        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            logging.warning("model not found. Using cl100k_base encoding.")

            encoding = tiktoken.get_encoding("cl100k_base")

        if model in {
            "gpt-3.5-turbo-0613",
            "gpt-3.5-turbo-16k-0613",
            "gpt-4-0314",
            "gpt-4-32k-0314",
            "gpt-4-0613",
            "gpt-4-32k-0613",
        }:
            tokens_per_message = 3
            tokens_per_name = 1
        elif model == "gpt-3.5-turbo-0301":
            # every message follows <|start|>{role/name}\n{content}<|end|>\n
            tokens_per_message = 4
            # if there's a name, the role is omitted
            tokens_per_name = -1
        elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
            logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
            return self.count_tokens(text, model="gpt-3.5-turbo-0613")
        elif "gpt-4" in model:
            logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
            return self.count_tokens(text, model="gpt-4-0613")
        else:
            raise NotImplementedError(
                f"""token_count() is not implemented for model {model}. 
                See https://github.com/openai/openai-python/blob/main/chatml.md for 
                information on how messages are converted to tokens."""
            )

        num_tokens = 0

        for message in text:
            num_tokens += tokens_per_message
            for key, value in message.items():
                num_tokens += len(encoding.encode(value))
                if key == "name":
                    num_tokens += tokens_per_name

        # every reply is primed with <|start|>assistant<|message|>
        num_tokens += 3

        return num_tokens
    else:
        return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

SimpleTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/simple_tokenizer.py
@define()
class SimpleTokenizer(BaseTokenizer):
    model: str = field(kw_only=True, init=False)
    characters_per_token: int = field(kw_only=True)

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token

            return num_tokens
        else:
            raise ValueError("Text must be a string.")

characters_per_token: int = field(kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True, init=False) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/simple_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        num_tokens = (len(text) + self.characters_per_token - 1) // self.characters_per_token

        return num_tokens
    else:
        raise ValueError("Text must be a string.")

VoyageAiTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/voyageai_tokenizer.py
@define()
class VoyageAiTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "voyage-large-2": 16000,
        "voyage-code-2": 16000,
        "voyage-2": 4000,
        "voyage-lite-02-instruct": 4000,
    }
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"voyage": 0}

    api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False})
    client: Client = field(
        default=Factory(
            lambda self: import_optional_dependency("voyageai").Client(api_key=self.api_key), takes_self=True
        ),
        kw_only=True,
    )

    def count_tokens(self, text: str | list) -> int:
        if isinstance(text, str):
            return self.client.count_tokens([text])
        else:
            raise ValueError("Text must be a str.")

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'voyage-large-2': 16000, 'voyage-code-2': 16000, 'voyage-2': 4000, 'voyage-lite-02-instruct': 4000} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'voyage': 0} class-attribute instance-attribute

api_key: Optional[str] = field(default=None, kw_only=True, metadata={'serializable': False}) class-attribute instance-attribute

client: Client = field(default=Factory(lambda self: import_optional_dependency('voyageai').Client(api_key=self.api_key), takes_self=True), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/voyageai_tokenizer.py
def count_tokens(self, text: str | list) -> int:
    if isinstance(text, str):
        return self.client.count_tokens([text])
    else:
        raise ValueError("Text must be a str.")