Drivers
__all__ = ['BasePromptDriver', 'OpenAiChatPromptDriver', 'OpenAiCompletionPromptDriver', 'AzureOpenAiChatPromptDriver', 'AzureOpenAiCompletionPromptDriver', 'CoherePromptDriver', 'HuggingFacePipelinePromptDriver', 'HuggingFaceHubPromptDriver', 'AnthropicPromptDriver', 'AmazonSageMakerPromptDriver', 'AmazonBedrockPromptDriver', 'BaseMultiModelPromptDriver', 'BaseConversationMemoryDriver', 'LocalConversationMemoryDriver', 'AmazonDynamoDbConversationMemoryDriver', 'BaseEmbeddingDriver', 'OpenAiEmbeddingDriver', 'AzureOpenAiEmbeddingDriver', 'BedrockTitanEmbeddingDriver', 'BaseVectorStoreDriver', 'LocalVectorStoreDriver', 'PineconeVectorStoreDriver', 'MarqoVectorStoreDriver', 'MongoDbAtlasVectorStoreDriver', 'RedisVectorStoreDriver', 'OpenSearchVectorStoreDriver', 'AmazonOpenSearchVectorStoreDriver', 'PgVectorVectorStoreDriver', 'BaseSqlDriver', 'AmazonRedshiftSqlDriver', 'SnowflakeSqlDriver', 'SqlDriver', 'BasePromptModelDriver', 'SageMakerLlamaPromptModelDriver', 'SageMakerFalconPromptModelDriver', 'BedrockTitanPromptModelDriver', 'BedrockClaudePromptModelDriver', 'BedrockJurassicPromptModelDriver']
module-attribute
AmazonBedrockPromptDriver
Bases: BaseMultiModelPromptDriver
Source code in griptape/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py
bedrock_client: Any = field(default=Factory(lambda : self.session.client('bedrock-runtime'), takes_self=True), kw_only=True)
class-attribute
instance-attribute
session: boto3.Session = field(default=Factory(lambda : import_optional_dependency('boto3').Session()), kw_only=True)
class-attribute
instance-attribute
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py
try_stream(prompt_stack)
Source code in griptape/griptape/drivers/prompt/amazon_bedrock_prompt_driver.py
AmazonDynamoDbConversationMemoryDriver
Bases: BaseConversationMemoryDriver
Source code in griptape/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py
partition_key: str = field(kw_only=True)
class-attribute
instance-attribute
partition_key_value: str = field(kw_only=True)
class-attribute
instance-attribute
session: boto3.Session = field(default=Factory(lambda : import_optional_dependency('boto3').Session()), kw_only=True)
class-attribute
instance-attribute
table: Any = field(init=False)
class-attribute
instance-attribute
table_name: str = field(kw_only=True)
class-attribute
instance-attribute
value_attribute_key: str = field(kw_only=True)
class-attribute
instance-attribute
__attrs_post_init__()
load()
Source code in griptape/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py
store(memory)
Source code in griptape/griptape/drivers/memory/conversation/amazon_dynamodb_conversation_memory_driver.py
AmazonOpenSearchVectorStoreDriver
Bases: OpenSearchVectorStoreDriver
A Vector Store Driver for Amazon OpenSearch.
Attributes:
Name | Type | Description |
---|---|---|
session |
Session
|
The boto3 session to use. |
http_auth |
Optional[str | Tuple[str, str]]
|
The HTTP authentication credentials to use. Defaults to using credentials in the boto3 session. |
client |
Optional[OpenSearch]
|
An optional OpenSearch client to use. Defaults to a new client using the host, port, http_auth, use_ssl, and verify_certs attributes. |
Source code in griptape/griptape/drivers/vector/amazon_opensearch_vector_store_driver.py
client: Optional[OpenSearch] = field(default=Factory(lambda : import_optional_dependency('opensearchpy').OpenSearch(hosts=[{'host': self.host, 'port': self.port}], http_auth=self.http_auth, use_ssl=self.use_ssl, verify_certs=self.verify_certs, connection_class=import_optional_dependency('opensearchpy').RequestsHttpConnection), takes_self=True))
class-attribute
instance-attribute
http_auth: Optional[str | Tuple[str, str]] = field(default=Factory(lambda : import_optional_dependency('requests_aws4auth').AWS4Auth(self.session.get_credentials().access_key, self.session.get_credentials().secret_key, self.session.region_name, 'es'), takes_self=True))
class-attribute
instance-attribute
session: Session = field(kw_only=True)
class-attribute
instance-attribute
AmazonRedshiftSqlDriver
Bases: BaseSqlDriver
Source code in griptape/griptape/drivers/sql/amazon_redshift_sql_driver.py
client: Any = field(default=Factory(lambda : self.session.client('redshift-data'), takes_self=True), kw_only=True)
class-attribute
instance-attribute
cluster_identifier: Optional[str] = field(default=None, kw_only=True)
class-attribute
instance-attribute
database: str = field(kw_only=True)
class-attribute
instance-attribute
database_credentials_secret_arn: Optional[str] = field(default=None, kw_only=True)
class-attribute
instance-attribute
db_user: Optional[str] = field(default=None, kw_only=True)
class-attribute
instance-attribute
session: boto3.Session = field(kw_only=True)
class-attribute
instance-attribute
wait_for_query_completion_sec: float = field(default=0.3, kw_only=True)
class-attribute
instance-attribute
workgroup_name: Optional[str] = field(default=None, kw_only=True)
class-attribute
instance-attribute
execute_query(query)
execute_query_raw(query)
Source code in griptape/griptape/drivers/sql/amazon_redshift_sql_driver.py
get_table_schema(table, schema=None)
Source code in griptape/griptape/drivers/sql/amazon_redshift_sql_driver.py
validate_params(_, workgroup_name)
Source code in griptape/griptape/drivers/sql/amazon_redshift_sql_driver.py
AmazonSageMakerPromptDriver
Bases: BaseMultiModelPromptDriver
Source code in griptape/griptape/drivers/prompt/amazon_sagemaker_prompt_driver.py
custom_attributes: str = field(default='accept_eula=true', kw_only=True)
class-attribute
instance-attribute
sagemaker_client: Any = field(default=Factory(lambda : self.session.client('sagemaker-runtime'), takes_self=True), kw_only=True)
class-attribute
instance-attribute
session: boto3.Session = field(default=Factory(lambda : import_optional_dependency('boto3').Session()), kw_only=True)
class-attribute
instance-attribute
stream: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/amazon_sagemaker_prompt_driver.py
try_stream(_)
AnthropicPromptDriver
Bases: BasePromptDriver
Attributes:
Name | Type | Description |
---|---|---|
api_key |
str
|
Anthropic API key. |
model |
str
|
Anthropic model name. |
tokenizer |
AnthropicTokenizer
|
Custom |
Source code in griptape/griptape/drivers/prompt/anthropic_prompt_driver.py
api_key: str = field(kw_only=True)
class-attribute
instance-attribute
model: str = field(kw_only=True)
class-attribute
instance-attribute
tokenizer: AnthropicTokenizer = field(default=Factory(lambda : AnthropicTokenizer(model=self.model), takes_self=True), kw_only=True)
class-attribute
instance-attribute
default_prompt_stack_to_string_converter(prompt_stack)
Source code in griptape/griptape/drivers/prompt/anthropic_prompt_driver.py
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/anthropic_prompt_driver.py
try_stream(prompt_stack)
Source code in griptape/griptape/drivers/prompt/anthropic_prompt_driver.py
AzureOpenAiChatPromptDriver
Bases: OpenAiChatPromptDriver
Attributes:
Name | Type | Description |
---|---|---|
azure_deployment |
str
|
An Azure OpenAi deployment id. |
azure_endpoint |
str
|
An Azure OpenAi endpoint. |
azure_ad_token |
Optional[str]
|
An optional Azure Active Directory token. |
azure_ad_token_provider |
Optional[str]
|
An optional Azure Active Directory token provider. |
api_version |
str
|
An Azure OpenAi API version. |
client |
AzureOpenAI
|
An |
Source code in griptape/griptape/drivers/prompt/azure_openai_chat_prompt_driver.py
api_version: str = field(default='2023-05-15', kw_only=True)
class-attribute
instance-attribute
azure_ad_token: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_ad_token_provider: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_deployment: str = field(kw_only=True)
class-attribute
instance-attribute
azure_endpoint: str = field(kw_only=True)
class-attribute
instance-attribute
client: openai.AzureOpenAI = field(default=Factory(lambda : openai.AzureOpenAI(organization=self.organization, api_key=self.api_key, api_version=self.api_version, azure_endpoint=self.azure_endpoint, azure_deployment=self.azure_deployment, azure_ad_token=self.azure_ad_token, azure_ad_token_provider=self.azure_ad_token_provider), takes_self=True))
class-attribute
instance-attribute
AzureOpenAiCompletionPromptDriver
Bases: OpenAiCompletionPromptDriver
Attributes:
Name | Type | Description |
---|---|---|
azure_deployment |
str
|
An Azure OpenAi deployment id. |
azure_endpoint |
str
|
An Azure OpenAi endpoint. |
azure_ad_token |
Optional[str]
|
An optional Azure Active Directory token. |
azure_ad_token_provider |
Optional[str]
|
An optional Azure Active Directory token provider. |
api_version |
str
|
An Azure OpenAi API version. |
client |
AzureOpenAI
|
An |
Source code in griptape/griptape/drivers/prompt/azure_openai_completion_prompt_driver.py
api_version: str = field(default='2023-05-15', kw_only=True)
class-attribute
instance-attribute
azure_ad_token: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_ad_token_provider: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_deployment: str = field(kw_only=True)
class-attribute
instance-attribute
azure_endpoint: str = field(kw_only=True)
class-attribute
instance-attribute
client: openai.AzureOpenAI = field(default=Factory(lambda : openai.AzureOpenAI(organization=self.organization, api_key=self.api_key, api_version=self.api_version, azure_endpoint=self.azure_endpoint, azure_deployment=self.azure_deployment), takes_self=True))
class-attribute
instance-attribute
AzureOpenAiEmbeddingDriver
Bases: OpenAiEmbeddingDriver
Attributes:
Name | Type | Description |
---|---|---|
azure_deployment |
str
|
An Azure OpenAi deployment id. |
azure_endpoint |
str
|
An Azure OpenAi endpoint. |
azure_ad_token |
Optional[str]
|
An optional Azure Active Directory token. |
azure_ad_token_provider |
Optional[str]
|
An optional Azure Active Directory token provider. |
api_version |
str
|
An Azure OpenAi API version. |
tokenizer |
OpenAiTokenizer
|
An |
client |
AzureOpenAI
|
An |
Source code in griptape/griptape/drivers/embedding/azure_openai_embedding_driver.py
api_version: str = field(default='2023-05-15', kw_only=True)
class-attribute
instance-attribute
azure_ad_token: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_ad_token_provider: Optional[str] = field(kw_only=True, default=None)
class-attribute
instance-attribute
azure_deployment: str = field(kw_only=True)
class-attribute
instance-attribute
azure_endpoint: str = field(kw_only=True)
class-attribute
instance-attribute
client: openai.AzureOpenAI = field(default=Factory(lambda : openai.AzureOpenAI(organization=self.organization, api_key=self.api_key, api_version=self.api_version, azure_endpoint=self.azure_endpoint, azure_deployment=self.azure_deployment, azure_ad_token=self.azure_ad_token, azure_ad_token_provider=self.azure_ad_token_provider), takes_self=True))
class-attribute
instance-attribute
tokenizer: OpenAiTokenizer = field(default=Factory(lambda : OpenAiTokenizer(model=self.model), takes_self=True), kw_only=True)
class-attribute
instance-attribute
BaseConversationMemoryDriver
Bases: ABC
Source code in griptape/griptape/drivers/memory/conversation/base_conversation_memory_driver.py
load(*args, **kwargs)
abstractmethod
BaseEmbeddingDriver
Bases: ExponentialBackoffMixin
, ABC
Attributes:
Name | Type | Description |
---|---|---|
dimensions |
int
|
Vector dimensions. |
Source code in griptape/griptape/drivers/embedding/base_embedding_driver.py
chunker: BaseChunker = field(init=False)
class-attribute
instance-attribute
dimensions: int = field(kw_only=True)
class-attribute
instance-attribute
tokenizer: BaseTokenizer = field(kw_only=True)
class-attribute
instance-attribute
__attrs_post_init__()
embed_string(string)
Source code in griptape/griptape/drivers/embedding/base_embedding_driver.py
embed_text_artifact(artifact)
BaseMultiModelPromptDriver
Bases: BasePromptDriver
, ABC
Prompt Driver for platforms like Amazon SageMaker, and Amazon Bedrock that host many LLM models.
Instances of this Prompt Driver require a Prompt Model Driver which is used to convert the prompt stack into a model input and parameters, and to process the model output.
Attributes:
Name | Type | Description |
---|---|---|
model |
Name of the model to use. |
|
tokenizer |
Optional[BaseTokenizer]
|
Tokenizer to use. Defaults to the Tokenizer of the Prompt Model Driver. |
prompt_model_driver |
BasePromptModelDriver
|
Prompt Model Driver to use. |
Source code in griptape/griptape/drivers/prompt/base_multi_model_prompt_driver.py
prompt_model_driver: BasePromptModelDriver = field(kw_only=True)
class-attribute
instance-attribute
stream: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
tokenizer: Optional[BaseTokenizer] = field(default=None, kw_only=True)
class-attribute
instance-attribute
__attrs_post_init__()
validate_stream(_, stream)
Source code in griptape/griptape/drivers/prompt/base_multi_model_prompt_driver.py
BasePromptDriver
Bases: ExponentialBackoffMixin
, ABC
Base class for Prompt Drivers.
Attributes:
Name | Type | Description |
---|---|---|
temperature |
float
|
The temperature to use for the completion. |
max_tokens |
Optional[int]
|
The maximum number of tokens to generate. If not specified, the value will be automatically generated based by the tokenizer. |
structure |
Optional[Structure]
|
An optional |
prompt_stack_to_string |
Callable[[PromptStack], str]
|
A function that converts a |
ignored_exception_types |
Tuple[Type[Exception], ...]
|
A tuple of exception types to ignore. |
model |
str
|
The model name. |
tokenizer |
BaseTokenizer
|
An instance of |
stream |
bool
|
Whether to stream the completion or not. |
Source code in griptape/griptape/drivers/prompt/base_prompt_driver.py
ignored_exception_types: Tuple[Type[Exception], ...] = field(default=Factory(lambda : ImportError), kw_only=True)
class-attribute
instance-attribute
max_tokens: Optional[int] = field(default=None, kw_only=True)
class-attribute
instance-attribute
model: str
instance-attribute
prompt_stack_to_string: Callable[[PromptStack], str] = field(default=Factory(lambda : self.default_prompt_stack_to_string_converter, takes_self=True), kw_only=True)
class-attribute
instance-attribute
stream: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
structure: Optional[Structure] = field(default=None, kw_only=True)
class-attribute
instance-attribute
temperature: float = field(default=0.1, kw_only=True)
class-attribute
instance-attribute
tokenizer: BaseTokenizer
instance-attribute
after_run(result)
before_run(prompt_stack)
default_prompt_stack_to_string_converter(prompt_stack)
Source code in griptape/griptape/drivers/prompt/base_prompt_driver.py
max_output_tokens(text)
run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/base_prompt_driver.py
token_count(prompt_stack)
try_run(prompt_stack)
abstractmethod
BasePromptModelDriver
Bases: ABC
Source code in griptape/griptape/drivers/prompt_model/base_prompt_model_driver.py
max_tokens: int = field(default=600, kw_only=True)
class-attribute
instance-attribute
prompt_driver: Optional[BasePromptDriver] = field(default=None, kw_only=True)
class-attribute
instance-attribute
supports_streaming: bool = field(default=True, kw_only=True)
class-attribute
instance-attribute
tokenizer: BaseTokenizer
abstractmethod
property
process_output(output)
abstractmethod
prompt_stack_to_model_input(prompt_stack)
abstractmethod
BaseSqlDriver
Bases: ABC
Source code in griptape/griptape/drivers/sql/base_sql_driver.py
RowResult
dataclass
execute_query(query)
abstractmethod
execute_query_raw(query)
abstractmethod
BaseVectorStoreDriver
Bases: ABC
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
DEFAULT_QUERY_COUNT = 5
class-attribute
instance-attribute
embedding_driver: BaseEmbeddingDriver = field(kw_only=True)
class-attribute
instance-attribute
futures_executor: futures.Executor = field(default=Factory(lambda : futures.ThreadPoolExecutor()), kw_only=True)
class-attribute
instance-attribute
Entry
dataclass
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
id: str
instance-attribute
meta: Optional[dict] = None
class-attribute
instance-attribute
namespace: Optional[str] = None
class-attribute
instance-attribute
vector: list[float]
instance-attribute
QueryResult
dataclass
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
id: str
instance-attribute
meta: Optional[dict] = None
class-attribute
instance-attribute
namespace: Optional[str] = None
class-attribute
instance-attribute
score: float
instance-attribute
vector: list[float]
instance-attribute
load_entries(namespace=None)
abstractmethod
load_entry(vector_id, namespace=None)
abstractmethod
query(query, count=None, namespace=None, include_vectors=False, **kwargs)
abstractmethod
upsert_text(string, vector_id=None, namespace=None, meta=None, **kwargs)
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
upsert_text_artifact(artifact, namespace=None, meta=None, **kwargs)
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
upsert_text_artifacts(artifacts, meta=None, **kwargs)
Source code in griptape/griptape/drivers/vector/base_vector_store_driver.py
upsert_vector(vector, vector_id=None, namespace=None, meta=None, **kwargs)
abstractmethod
BedrockClaudePromptModelDriver
Bases: BasePromptModelDriver
Source code in griptape/griptape/drivers/prompt_model/bedrock_claude_prompt_model_driver.py
prompt_driver: Optional[AmazonBedrockPromptDriver] = field(default=None, kw_only=True)
class-attribute
instance-attribute
tokenizer: BedrockClaudeTokenizer
property
Returns the tokenizer for this driver.
We need to pass the session
field from the Prompt Driver to the
Tokenizer. However, the Prompt Driver is not initialized until after
the Prompt Model Driver is initialized. To resolve this, we make the tokenizer
field a @property that is only initialized when it is first accessed.
This ensures that by the time we need to initialize the Tokenizer, the
Prompt Driver has already been initialized.
See this thread more more information: https://github.com/griptape-ai/griptape/issues/244
Returns:
Name | Type | Description |
---|---|---|
BedrockClaudeTokenizer |
BedrockClaudeTokenizer
|
The tokenizer for this driver. |
top_k: int = field(default=250, kw_only=True)
class-attribute
instance-attribute
top_p: float = field(default=0.999, kw_only=True)
class-attribute
instance-attribute
process_output(response_body)
prompt_stack_to_model_input(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_claude_prompt_model_driver.py
prompt_stack_to_model_params(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_claude_prompt_model_driver.py
BedrockJurassicPromptModelDriver
Bases: BasePromptModelDriver
Source code in griptape/griptape/drivers/prompt_model/bedrock_jurassic_prompt_model_driver.py
prompt_driver: Optional[AmazonBedrockPromptDriver] = field(default=None, kw_only=True)
class-attribute
instance-attribute
supports_streaming: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
tokenizer: BedrockJurassicTokenizer
property
Returns the tokenizer for this driver.
We need to pass the session
field from the Prompt Driver to the
Tokenizer. However, the Prompt Driver is not initialized until after
the Prompt Model Driver is initialized. To resolve this, we make the tokenizer
field a @property that is only initialized when it is first accessed.
This ensures that by the time we need to initialize the Tokenizer, the
Prompt Driver has already been initialized.
See this thread more more information: https://github.com/griptape-ai/griptape/issues/244
Returns:
Name | Type | Description |
---|---|---|
BedrockJurassicTokenizer |
BedrockJurassicTokenizer
|
The tokenizer for this driver. |
top_p: float = field(default=0.9, kw_only=True)
class-attribute
instance-attribute
process_output(response_body)
prompt_stack_to_model_input(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_jurassic_prompt_model_driver.py
prompt_stack_to_model_params(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_jurassic_prompt_model_driver.py
BedrockTitanEmbeddingDriver
Bases: BaseEmbeddingDriver
Attributes:
Name | Type | Description |
---|---|---|
model |
str
|
Embedding model name. Defaults to DEFAULT_MODEL. |
dimensions |
int
|
Vector dimensions. Defaults to DEFAULT_MAX_TOKENS. |
tokenizer |
BedrockTitanTokenizer
|
Optionally provide custom |
session |
Session
|
Optionally provide custom |
bedrock_client |
Any
|
Optionally provide custom |
Source code in griptape/griptape/drivers/embedding/bedrock_titan_embedding_driver.py
DEFAULT_MAX_TOKENS = 1536
class-attribute
instance-attribute
DEFAULT_MODEL = 'amazon.titan-embed-text-v1'
class-attribute
instance-attribute
bedrock_client: Any = field(default=Factory(lambda : self.session.client('bedrock-runtime'), takes_self=True), kw_only=True)
class-attribute
instance-attribute
dimensions: int = field(default=DEFAULT_MAX_TOKENS, kw_only=True)
class-attribute
instance-attribute
model: str = field(default=DEFAULT_MODEL, kw_only=True)
class-attribute
instance-attribute
session: boto3.Session = field(default=Factory(lambda : import_optional_dependency('boto3').Session()), kw_only=True)
class-attribute
instance-attribute
tokenizer: BedrockTitanTokenizer = field(default=Factory(lambda : BedrockTitanTokenizer(model=self.model, session=self.session), takes_self=True), kw_only=True)
class-attribute
instance-attribute
try_embed_chunk(chunk)
Source code in griptape/griptape/drivers/embedding/bedrock_titan_embedding_driver.py
BedrockTitanPromptModelDriver
Bases: BasePromptModelDriver
Source code in griptape/griptape/drivers/prompt_model/bedrock_titan_prompt_model_driver.py
prompt_driver: Optional[AmazonBedrockPromptDriver] = field(default=None, kw_only=True)
class-attribute
instance-attribute
tokenizer: BedrockTitanTokenizer
property
Returns the tokenizer for this driver.
We need to pass the session
field from the Prompt Driver to the
Tokenizer. However, the Prompt Driver is not initialized until after
the Prompt Model Driver is initialized. To resolve this, we make the tokenizer
field a @property that is only initialized when it is first accessed.
This ensures that by the time we need to initialize the Tokenizer, the
Prompt Driver has already been initialized.
See this thread more more information: https://github.com/griptape-ai/griptape/issues/244
Returns:
Name | Type | Description |
---|---|---|
BedrockTitanTokenizer |
BedrockTitanTokenizer
|
The tokenizer for this driver. |
top_p: float = field(default=0.9, kw_only=True)
class-attribute
instance-attribute
process_output(response_body)
Source code in griptape/griptape/drivers/prompt_model/bedrock_titan_prompt_model_driver.py
prompt_stack_to_model_input(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_titan_prompt_model_driver.py
prompt_stack_to_model_params(prompt_stack)
Source code in griptape/griptape/drivers/prompt_model/bedrock_titan_prompt_model_driver.py
CoherePromptDriver
Bases: BasePromptDriver
Attributes:
Name | Type | Description |
---|---|---|
api_key |
str
|
Cohere API key. |
model |
str
|
Cohere model name. |
client |
Client
|
Custom |
tokenizer |
CohereTokenizer
|
Custom |
Source code in griptape/griptape/drivers/prompt/cohere_prompt_driver.py
api_key: str = field(kw_only=True)
class-attribute
instance-attribute
client: Client = field(default=Factory(lambda : import_optional_dependency('cohere').Client(self.api_key), takes_self=True), kw_only=True)
class-attribute
instance-attribute
model: str = field(kw_only=True)
class-attribute
instance-attribute
tokenizer: CohereTokenizer = field(default=Factory(lambda : CohereTokenizer(model=self.model, client=self.client), takes_self=True), kw_only=True)
class-attribute
instance-attribute
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/cohere_prompt_driver.py
try_stream(prompt_stack)
HuggingFaceHubPromptDriver
Bases: BasePromptDriver
Attributes:
Name | Type | Description |
---|---|---|
api_token |
str
|
Hugging Face Hub API token. |
use_gpu |
bool
|
Use GPU during model run. |
params |
dict
|
Custom model run parameters. |
model |
str
|
Hugging Face Hub model name. |
client |
InferenceApi
|
Custom |
tokenizer |
HuggingFaceTokenizer
|
Custom |
Source code in griptape/griptape/drivers/prompt/hugging_face_hub_prompt_driver.py
DEFAULT_PARAMS = {'return_full_text': False, 'max_new_tokens': MAX_NEW_TOKENS}
class-attribute
instance-attribute
MAX_NEW_TOKENS = 250
class-attribute
instance-attribute
SUPPORTED_TASKS = ['text2text-generation', 'text-generation']
class-attribute
instance-attribute
api_token: str = field(kw_only=True)
class-attribute
instance-attribute
client: InferenceApi = field(default=Factory(lambda : import_optional_dependency('huggingface_hub').InferenceApi(repo_id=self.model, token=self.api_token, gpu=self.use_gpu), takes_self=True), kw_only=True)
class-attribute
instance-attribute
model: str = field(kw_only=True)
class-attribute
instance-attribute
params: dict = field(factory=dict, kw_only=True)
class-attribute
instance-attribute
stream: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
tokenizer: HuggingFaceTokenizer = field(default=Factory(lambda : HuggingFaceTokenizer(tokenizer=import_optional_dependency('transformers').AutoTokenizer.from_pretrained(self.model), max_tokens=self.MAX_NEW_TOKENS), takes_self=True), kw_only=True)
class-attribute
instance-attribute
use_gpu: bool = field(default=False, kw_only=True)
class-attribute
instance-attribute
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/hugging_face_hub_prompt_driver.py
try_stream(_)
HuggingFacePipelinePromptDriver
Bases: BasePromptDriver
Attributes:
Name | Type | Description |
---|---|---|
params |
dict
|
Custom model run parameters. |
model |
str
|
Hugging Face Hub model name. |
tokenizer |
HuggingFaceTokenizer
|
Custom |
Source code in griptape/griptape/drivers/prompt/hugging_face_pipeline_prompt_driver.py
DEFAULT_PARAMS = {'return_full_text': False, 'num_return_sequences': 1}
class-attribute
instance-attribute
SUPPORTED_TASKS = ['text2text-generation', 'text-generation']
class-attribute
instance-attribute
model: str = field(kw_only=True)
class-attribute
instance-attribute
params: dict = field(factory=dict, kw_only=True)
class-attribute
instance-attribute
tokenizer: HuggingFaceTokenizer = field(default=Factory(lambda : HuggingFaceTokenizer(tokenizer=import_optional_dependency('transformers').AutoTokenizer.from_pretrained(self.model)), takes_self=True), kw_only=True)
class-attribute
instance-attribute
try_run(prompt_stack)
Source code in griptape/griptape/drivers/prompt/hugging_face_pipeline_prompt_driver.py
LocalConversationMemoryDriver
Bases: BaseConversationMemoryDriver
Source code in griptape/griptape/drivers/memory/conversation/local_conversation_memory_driver.py
file_path: str = field(default='griptape_memory.json', kw_only=True)
class-attribute
instance-attribute
load()
Source code in griptape/griptape/drivers/memory/conversation/local_conversation_memory_driver.py
LocalVectorStoreDriver
Bases: BaseVectorStoreDriver
Source code in griptape/griptape/drivers/vector/local_vector_store_driver.py
entries: dict[str, BaseVectorStoreDriver.Entry] = field(factory=dict, kw_only=True)
class-attribute
instance-attribute
relatedness_fn: Callable = field(default=lambda , : dot(x, y) / norm(x) * norm(y), kw_only=True)
class-attribute
instance-attribute
load_entries(namespace=None)
load_entry(vector_id, namespace=None)
query(query, count=None, namespace=None, include_vectors=False, **kwargs)
Source code in griptape/griptape/drivers/vector/local_vector_store_driver.py
upsert_vector(vector, vector_id=None, namespace=None, meta=None, **kwargs)
Source code in griptape/griptape/drivers/vector/local_vector_store_driver.py
MarqoVectorStoreDriver
Bases: BaseVectorStoreDriver
A Vector Store Driver for Marqo.
Attributes:
Name | Type | Description |
---|---|---|
api_key |
str
|
The API key for the Marqo API. |
url |
str
|
The URL to the Marqo API. |
mq |
Optional[Client]
|
An optional Marqo client. Defaults to a new client with the given URL and API key. |
index |
str
|
The name of the index to use. |
Source code in griptape/griptape/drivers/vector/marqo_vector_store_driver.py
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
|