Skip to content

tokenizers

__all__ = ['BaseTokenizer', 'OpenAiTokenizer', 'CohereTokenizer', 'HuggingFaceTokenizer', 'AnthropicTokenizer', 'GoogleTokenizer', 'VoyageAiTokenizer', 'SimpleTokenizer', 'DummyTokenizer', 'AmazonBedrockTokenizer'] module-attribute

AmazonBedrockTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/amazon_bedrock_tokenizer.py
@define()
class AmazonBedrockTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "anthropic.claude-3": 200000,
        "anthropic.claude-v2:1": 200000,
        "anthropic.claude": 100000,
        "cohere.command-r": 128000,
        "cohere.embed": 512,
        "cohere.command": 4000,
        "cohere": 1024,
        "ai21": 8192,
        "meta-llama3": 8000,
        "meta-llama2": 4096,
        "mistral": 32000,
        "amazon": 4096,
    }
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {
        "anthropic.claude": 4096,
        "cohere": 4096,
        "ai21.j2": 8191,
        "meta": 2048,
        "amazon.titan-text-lite": 4096,
        "amazon.titan-text-express": 8192,
        "amazon.titan-text-premier": 3072,
        "amazon": 4096,
        "mistral": 8192,
    }

    model: str = field(kw_only=True)
    characters_per_token: int = field(default=4, kw_only=True)

    def count_tokens(self, text: str) -> int:
        return (len(text) + self.characters_per_token - 1) // self.characters_per_token

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'anthropic.claude-3': 200000, 'anthropic.claude-v2:1': 200000, 'anthropic.claude': 100000, 'cohere.command-r': 128000, 'cohere.embed': 512, 'cohere.command': 4000, 'cohere': 1024, 'ai21': 8192, 'meta-llama3': 8000, 'meta-llama2': 4096, 'mistral': 32000, 'amazon': 4096} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'anthropic.claude': 4096, 'cohere': 4096, 'ai21.j2': 8191, 'meta': 2048, 'amazon.titan-text-lite': 4096, 'amazon.titan-text-express': 8192, 'amazon.titan-text-premier': 3072, 'amazon': 4096, 'mistral': 8192} class-attribute instance-attribute

characters_per_token: int = field(default=4, kw_only=True) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/amazon_bedrock_tokenizer.py
def count_tokens(self, text: str) -> int:
    return (len(text) + self.characters_per_token - 1) // self.characters_per_token

AnthropicTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/anthropic_tokenizer.py
@define()
class AnthropicTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"claude-3": 200000, "claude-2.1": 200000, "claude": 100000}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"claude": 4096}

    client: Anthropic = field(
        default=Factory(lambda: import_optional_dependency("anthropic").Anthropic()),
        kw_only=True,
    )

    def count_tokens(self, text: str) -> int:
        return self.client.count_tokens(text)

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'claude-3': 200000, 'claude-2.1': 200000, 'claude': 100000} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'claude': 4096} class-attribute instance-attribute

client: Anthropic = field(default=Factory(lambda: import_optional_dependency('anthropic').Anthropic()), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/anthropic_tokenizer.py
def count_tokens(self, text: str) -> int:
    return self.client.count_tokens(text)

BaseTokenizer

Bases: ABC

Source code in griptape/tokenizers/base_tokenizer.py
@define()
class BaseTokenizer(ABC):
    DEFAULT_MAX_INPUT_TOKENS = 4096
    DEFAULT_MAX_OUTPUT_TOKENS = 1000
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {}

    model: str = field(kw_only=True)
    stop_sequences: list[str] = field(default=Factory(list), kw_only=True)
    max_input_tokens: int = field(kw_only=True, default=None)
    max_output_tokens: int = field(kw_only=True, default=None)

    def __attrs_post_init__(self) -> None:
        if hasattr(self, "model"):
            if self.max_input_tokens is None:
                self.max_input_tokens = self._default_max_input_tokens()

            if self.max_output_tokens is None:
                self.max_output_tokens = self._default_max_output_tokens()

    def count_input_tokens_left(self, text: str) -> int:
        diff = self.max_input_tokens - self.count_tokens(text)

        if diff > 0:
            return diff
        else:
            return 0

    def count_output_tokens_left(self, text: str) -> int:
        diff = self.max_output_tokens - self.count_tokens(text)

        if diff > 0:
            return diff
        else:
            return 0

    @abstractmethod
    def count_tokens(self, text: str) -> int: ...

    def _default_max_input_tokens(self) -> int:
        tokens = next((v for k, v in self.MODEL_PREFIXES_TO_MAX_INPUT_TOKENS.items() if self.model.startswith(k)), None)

        if tokens is None:
            logging.warning(
                "Model %s not found in MODEL_PREFIXES_TO_MAX_INPUT_TOKENS, using default value of %s.",
                self.model,
                self.DEFAULT_MAX_INPUT_TOKENS,
            )
            return self.DEFAULT_MAX_INPUT_TOKENS
        else:
            return tokens

    def _default_max_output_tokens(self) -> int:
        tokens = next(
            (v for k, v in self.MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS.items() if self.model.startswith(k)),
            None,
        )

        if tokens is None:
            logging.warning(
                "Model %s not found in MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS, using default value of %s.",
                self.model,
                self.DEFAULT_MAX_OUTPUT_TOKENS,
            )
            return self.DEFAULT_MAX_OUTPUT_TOKENS
        else:
            return tokens

DEFAULT_MAX_INPUT_TOKENS = 4096 class-attribute instance-attribute

DEFAULT_MAX_OUTPUT_TOKENS = 1000 class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {} class-attribute instance-attribute

max_input_tokens: int = field(kw_only=True, default=None) class-attribute instance-attribute

max_output_tokens: int = field(kw_only=True, default=None) class-attribute instance-attribute

model: str = field(kw_only=True) class-attribute instance-attribute

stop_sequences: list[str] = field(default=Factory(list), kw_only=True) class-attribute instance-attribute

__attrs_post_init__()

Source code in griptape/tokenizers/base_tokenizer.py
def __attrs_post_init__(self) -> None:
    if hasattr(self, "model"):
        if self.max_input_tokens is None:
            self.max_input_tokens = self._default_max_input_tokens()

        if self.max_output_tokens is None:
            self.max_output_tokens = self._default_max_output_tokens()

count_input_tokens_left(text)

Source code in griptape/tokenizers/base_tokenizer.py
def count_input_tokens_left(self, text: str) -> int:
    diff = self.max_input_tokens - self.count_tokens(text)

    if diff > 0:
        return diff
    else:
        return 0

count_output_tokens_left(text)

Source code in griptape/tokenizers/base_tokenizer.py
def count_output_tokens_left(self, text: str) -> int:
    diff = self.max_output_tokens - self.count_tokens(text)

    if diff > 0:
        return diff
    else:
        return 0

count_tokens(text) abstractmethod

Source code in griptape/tokenizers/base_tokenizer.py
@abstractmethod
def count_tokens(self, text: str) -> int: ...

CohereTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/cohere_tokenizer.py
@define()
class CohereTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"command-r": 128000, "command": 4096, "embed": 512}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"command": 4096, "embed": 512}

    client: Client = field(kw_only=True)

    def count_tokens(self, text: str) -> int:
        return len(self.client.tokenize(text=text, model=self.model).tokens)

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'command-r': 128000, 'command': 4096, 'embed': 512} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'command': 4096, 'embed': 512} class-attribute instance-attribute

client: Client = field(kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/cohere_tokenizer.py
def count_tokens(self, text: str) -> int:
    return len(self.client.tokenize(text=text, model=self.model).tokens)

DummyTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/dummy_tokenizer.py
@define
class DummyTokenizer(BaseTokenizer):
    model: Optional[str] = field(default=None, kw_only=True)
    max_input_tokens: int = field(init=False, default=0, kw_only=True)
    max_output_tokens: int = field(init=False, default=0, kw_only=True)

    def count_tokens(self, text: str) -> int:
        raise DummyError(__class__.__name__, "count_tokens")

max_input_tokens: int = field(init=False, default=0, kw_only=True) class-attribute instance-attribute

max_output_tokens: int = field(init=False, default=0, kw_only=True) class-attribute instance-attribute

model: Optional[str] = field(default=None, kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/dummy_tokenizer.py
def count_tokens(self, text: str) -> int:
    raise DummyError(__class__.__name__, "count_tokens")

GoogleTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/google_tokenizer.py
@define()
class GoogleTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {"gemini": 30720}
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"gemini": 2048}

    api_key: str = field(kw_only=True, metadata={"serializable": True})
    _client: GenerativeModel = field(default=None, kw_only=True, alias="client", metadata={"serializable": False})

    @lazy_property()
    def client(self) -> GenerativeModel:
        genai = import_optional_dependency("google.generativeai")
        genai.configure(api_key=self.api_key)

        return genai.GenerativeModel(self.model)

    def count_tokens(self, text: str) -> int:
        return self.client.count_tokens(text).total_tokens

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'gemini': 30720} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'gemini': 2048} class-attribute instance-attribute

api_key: str = field(kw_only=True, metadata={'serializable': True}) class-attribute instance-attribute

client()

Source code in griptape/tokenizers/google_tokenizer.py
@lazy_property()
def client(self) -> GenerativeModel:
    genai = import_optional_dependency("google.generativeai")
    genai.configure(api_key=self.api_key)

    return genai.GenerativeModel(self.model)

count_tokens(text)

Source code in griptape/tokenizers/google_tokenizer.py
def count_tokens(self, text: str) -> int:
    return self.client.count_tokens(text).total_tokens

HuggingFaceTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/huggingface_tokenizer.py
@define()
class HuggingFaceTokenizer(BaseTokenizer):
    tokenizer: PreTrainedTokenizerBase = field(
        default=Factory(
            lambda self: import_optional_dependency("transformers").AutoTokenizer.from_pretrained(self.model),
            takes_self=True,
        ),
        kw_only=True,
    )
    max_input_tokens: int = field(
        default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True),
        kw_only=True,
    )
    max_output_tokens: int = field(default=4096, kw_only=True)

    def count_tokens(self, text: str) -> int:
        return len(self.tokenizer.encode(text))

max_input_tokens: int = field(default=Factory(lambda self: self.tokenizer.model_max_length, takes_self=True), kw_only=True) class-attribute instance-attribute

max_output_tokens: int = field(default=4096, kw_only=True) class-attribute instance-attribute

tokenizer: PreTrainedTokenizerBase = field(default=Factory(lambda self: import_optional_dependency('transformers').AutoTokenizer.from_pretrained(self.model), takes_self=True), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/huggingface_tokenizer.py
def count_tokens(self, text: str) -> int:
    return len(self.tokenizer.encode(text))

OpenAiTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/openai_tokenizer.py
@define()
class OpenAiTokenizer(BaseTokenizer):
    DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = "gpt-3.5-turbo-instruct"
    DEFAULT_OPENAI_GPT_3_CHAT_MODEL = "gpt-3.5-turbo"
    DEFAULT_OPENAI_GPT_4_MODEL = "gpt-4o"
    DEFAULT_ENCODING = "cl100k_base"
    DEFAULT_MAX_TOKENS = 2049
    DEFAULT_MAX_OUTPUT_TOKENS = 4096
    TOKEN_OFFSET = 8

    # https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "gpt-4o": 128000,
        "gpt-4-1106": 128000,
        "gpt-4-32k": 32768,
        "gpt-4": 8192,
        "gpt-3.5-turbo-16k": 16384,
        "gpt-3.5-turbo": 4096,
        "gpt-35-turbo-16k": 16384,
        "gpt-35-turbo": 4096,
        "text-embedding-ada-002": 8191,
        "text-embedding-ada-001": 2046,
        "text-embedding-3-small": 8191,
        "text-embedding-3-large": 8191,
    }

    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"gpt": 4096}

    EMBEDDING_MODELS = [
        "text-embedding-ada-002",
        "text-embedding-ada-001",
        "text-embedding-3-small",
        "text-embedding-3-large",
    ]

    max_input_tokens: int = field(
        kw_only=True,
        default=Factory(lambda self: self._default_max_input_tokens(), takes_self=True),
    )
    max_output_tokens: int = field(
        kw_only=True,
        default=Factory(lambda self: self._default_max_output_tokens(), takes_self=True),
    )

    @property
    def encoding(self) -> tiktoken.Encoding:
        try:
            return tiktoken.encoding_for_model(self.model)
        except KeyError:
            return tiktoken.get_encoding(self.DEFAULT_ENCODING)

    def _default_max_input_tokens(self) -> int:
        tokens = next((v for k, v in self.MODEL_PREFIXES_TO_MAX_INPUT_TOKENS.items() if self.model.startswith(k)), None)
        offset = 0 if self.model in self.EMBEDDING_MODELS else self.TOKEN_OFFSET

        return (tokens or self.DEFAULT_MAX_TOKENS) - offset

    def _default_max_output_tokens(self) -> int:
        tokens = next(
            (v for k, v in self.MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS.items() if self.model.startswith(k)),
            None,
        )

        if tokens is None:
            return self.DEFAULT_MAX_OUTPUT_TOKENS
        else:
            return tokens

    def count_tokens(self, text: str | list[dict], model: Optional[str] = None) -> int:  # noqa: C901
        """Handles the special case of ChatML.

        Implementation adopted from the official OpenAI notebook:
        https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.
        """
        if isinstance(text, list):
            model = model or self.model

            try:
                encoding = tiktoken.encoding_for_model(model)
            except KeyError:
                logging.warning("model not found. Using cl100k_base encoding.")

                encoding = tiktoken.get_encoding("cl100k_base")

            if model in {
                "gpt-3.5-turbo-0613",
                "gpt-3.5-turbo-16k-0613",
                "gpt-4-0314",
                "gpt-4-32k-0314",
                "gpt-4-0613",
                "gpt-4-32k-0613",
                "gpt-4o-2024-05-13",
            }:
                tokens_per_message = 3
                tokens_per_name = 1
            elif model == "gpt-3.5-turbo-0301":
                # every message follows <|start|>{role/name}\n{content}<|end|>\n
                tokens_per_message = 4
                # if there's a name, the role is omitted
                tokens_per_name = -1
            elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
                logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
                return self.count_tokens(text, model="gpt-3.5-turbo-0613")
            elif "gpt-4o" in model:
                logging.info("gpt-4o may update over time. Returning num tokens assuming gpt-4o-2024-05-13.")
                return self.count_tokens(text, model="gpt-4o-2024-05-13")
            elif "gpt-4" in model:
                logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
                return self.count_tokens(text, model="gpt-4-0613")
            else:
                raise NotImplementedError(
                    f"token_count() is not implemented for model {model}. "
                    "See https://github.com/openai/openai-python/blob/main/chatml.md for "
                    "information on how messages are converted to tokens."
                )

            num_tokens = 0

            for message in text:
                num_tokens += tokens_per_message
                for key, value in message.items():
                    num_tokens += len(encoding.encode(value))
                    if key == "name":
                        num_tokens += tokens_per_name

            # every reply is primed with <|start|>assistant<|message|>
            num_tokens += 3

            return num_tokens
        else:
            return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

DEFAULT_ENCODING = 'cl100k_base' class-attribute instance-attribute

DEFAULT_MAX_OUTPUT_TOKENS = 4096 class-attribute instance-attribute

DEFAULT_MAX_TOKENS = 2049 class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_CHAT_MODEL = 'gpt-3.5-turbo' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_3_COMPLETION_MODEL = 'gpt-3.5-turbo-instruct' class-attribute instance-attribute

DEFAULT_OPENAI_GPT_4_MODEL = 'gpt-4o' class-attribute instance-attribute

EMBEDDING_MODELS = ['text-embedding-ada-002', 'text-embedding-ada-001', 'text-embedding-3-small', 'text-embedding-3-large'] class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'gpt-4o': 128000, 'gpt-4-1106': 128000, 'gpt-4-32k': 32768, 'gpt-4': 8192, 'gpt-3.5-turbo-16k': 16384, 'gpt-3.5-turbo': 4096, 'gpt-35-turbo-16k': 16384, 'gpt-35-turbo': 4096, 'text-embedding-ada-002': 8191, 'text-embedding-ada-001': 2046, 'text-embedding-3-small': 8191, 'text-embedding-3-large': 8191} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'gpt': 4096} class-attribute instance-attribute

TOKEN_OFFSET = 8 class-attribute instance-attribute

encoding: tiktoken.Encoding property

max_input_tokens: int = field(kw_only=True, default=Factory(lambda self: self._default_max_input_tokens(), takes_self=True)) class-attribute instance-attribute

max_output_tokens: int = field(kw_only=True, default=Factory(lambda self: self._default_max_output_tokens(), takes_self=True)) class-attribute instance-attribute

count_tokens(text, model=None)

Handles the special case of ChatML.

Implementation adopted from the official OpenAI notebook: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.

Source code in griptape/tokenizers/openai_tokenizer.py
def count_tokens(self, text: str | list[dict], model: Optional[str] = None) -> int:  # noqa: C901
    """Handles the special case of ChatML.

    Implementation adopted from the official OpenAI notebook:
    https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb.
    """
    if isinstance(text, list):
        model = model or self.model

        try:
            encoding = tiktoken.encoding_for_model(model)
        except KeyError:
            logging.warning("model not found. Using cl100k_base encoding.")

            encoding = tiktoken.get_encoding("cl100k_base")

        if model in {
            "gpt-3.5-turbo-0613",
            "gpt-3.5-turbo-16k-0613",
            "gpt-4-0314",
            "gpt-4-32k-0314",
            "gpt-4-0613",
            "gpt-4-32k-0613",
            "gpt-4o-2024-05-13",
        }:
            tokens_per_message = 3
            tokens_per_name = 1
        elif model == "gpt-3.5-turbo-0301":
            # every message follows <|start|>{role/name}\n{content}<|end|>\n
            tokens_per_message = 4
            # if there's a name, the role is omitted
            tokens_per_name = -1
        elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
            logging.info("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
            return self.count_tokens(text, model="gpt-3.5-turbo-0613")
        elif "gpt-4o" in model:
            logging.info("gpt-4o may update over time. Returning num tokens assuming gpt-4o-2024-05-13.")
            return self.count_tokens(text, model="gpt-4o-2024-05-13")
        elif "gpt-4" in model:
            logging.info("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
            return self.count_tokens(text, model="gpt-4-0613")
        else:
            raise NotImplementedError(
                f"token_count() is not implemented for model {model}. "
                "See https://github.com/openai/openai-python/blob/main/chatml.md for "
                "information on how messages are converted to tokens."
            )

        num_tokens = 0

        for message in text:
            num_tokens += tokens_per_message
            for key, value in message.items():
                num_tokens += len(encoding.encode(value))
                if key == "name":
                    num_tokens += tokens_per_name

        # every reply is primed with <|start|>assistant<|message|>
        num_tokens += 3

        return num_tokens
    else:
        return len(self.encoding.encode(text, allowed_special=set(self.stop_sequences)))

SimpleTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/simple_tokenizer.py
@define()
class SimpleTokenizer(BaseTokenizer):
    model: str = field(init=False, kw_only=True)
    characters_per_token: int = field(kw_only=True)

    def count_tokens(self, text: str) -> int:
        return (len(text) + self.characters_per_token - 1) // self.characters_per_token

characters_per_token: int = field(kw_only=True) class-attribute instance-attribute

model: str = field(init=False, kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/simple_tokenizer.py
def count_tokens(self, text: str) -> int:
    return (len(text) + self.characters_per_token - 1) // self.characters_per_token

VoyageAiTokenizer

Bases: BaseTokenizer

Source code in griptape/tokenizers/voyageai_tokenizer.py
@define()
class VoyageAiTokenizer(BaseTokenizer):
    MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {
        "voyage-large-2": 16000,
        "voyage-code-2": 16000,
        "voyage-2": 4000,
        "voyage-lite-02-instruct": 4000,
    }
    MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {"voyage": 0}

    api_key: Optional[str] = field(default=None, kw_only=True, metadata={"serializable": False})
    client: Client = field(
        default=Factory(
            lambda self: import_optional_dependency("voyageai").Client(api_key=self.api_key),
            takes_self=True,
        ),
        kw_only=True,
    )

    def count_tokens(self, text: str) -> int:
        return self.client.count_tokens([text])

MODEL_PREFIXES_TO_MAX_INPUT_TOKENS = {'voyage-large-2': 16000, 'voyage-code-2': 16000, 'voyage-2': 4000, 'voyage-lite-02-instruct': 4000} class-attribute instance-attribute

MODEL_PREFIXES_TO_MAX_OUTPUT_TOKENS = {'voyage': 0} class-attribute instance-attribute

api_key: Optional[str] = field(default=None, kw_only=True, metadata={'serializable': False}) class-attribute instance-attribute

client: Client = field(default=Factory(lambda self: import_optional_dependency('voyageai').Client(api_key=self.api_key), takes_self=True), kw_only=True) class-attribute instance-attribute

count_tokens(text)

Source code in griptape/tokenizers/voyageai_tokenizer.py
def count_tokens(self, text: str) -> int:
    return self.client.count_tokens([text])