Skip to content

Engines

__all__ = ['BaseQueryEngine', 'VectorQueryEngine', 'BaseSummaryEngine', 'PromptSummaryEngine', 'BaseExtractionEngine', 'CsvExtractionEngine', 'JsonExtractionEngine', 'BaseImageGenerationEngine', 'PromptImageGenerationEngine', 'VariationImageGenerationEngine', 'InpaintingImageGenerationEngine', 'OutpaintingImageGenerationEngine', 'ImageQueryEngine', 'TextToSpeechEngine', 'AudioTranscriptionEngine'] module-attribute

AudioTranscriptionEngine

Source code in griptape/engines/audio/audio_transcription_engine.py
@define
class AudioTranscriptionEngine:
    audio_transcription_driver: BaseAudioTranscriptionDriver = field(kw_only=True)

    def run(self, audio: AudioArtifact, *args, **kwargs) -> TextArtifact:
        return self.audio_transcription_driver.try_run(audio)

audio_transcription_driver: BaseAudioTranscriptionDriver = field(kw_only=True) class-attribute instance-attribute

run(audio, *args, **kwargs)

Source code in griptape/engines/audio/audio_transcription_engine.py
def run(self, audio: AudioArtifact, *args, **kwargs) -> TextArtifact:
    return self.audio_transcription_driver.try_run(audio)

BaseExtractionEngine

Bases: ABC

Source code in griptape/engines/extraction/base_extraction_engine.py
@define
class BaseExtractionEngine(ABC):
    max_token_multiplier: float = field(default=0.5, kw_only=True)
    chunk_joiner: str = field(default="\n\n", kw_only=True)
    prompt_driver: BasePromptDriver = field(kw_only=True)
    chunker: BaseChunker = field(
        default=Factory(
            lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens),
            takes_self=True,
        ),
        kw_only=True,
    )

    @max_token_multiplier.validator  # pyright: ignore
    def validate_max_token_multiplier(self, _, max_token_multiplier: int) -> None:
        if max_token_multiplier > 1:
            raise ValueError("has to be less than or equal to 1")
        elif max_token_multiplier <= 0:
            raise ValueError("has to be greater than 0")

    @property
    def max_chunker_tokens(self) -> int:
        return round(self.prompt_driver.tokenizer.max_input_tokens * self.max_token_multiplier)

    @property
    def min_response_tokens(self) -> int:
        return round(
            self.prompt_driver.tokenizer.max_input_tokens
            - self.prompt_driver.tokenizer.max_input_tokens * self.max_token_multiplier
        )

    @abstractmethod
    def extract(
        self, text: str | ListArtifact, *, rulesets: Optional[list[Ruleset]] = None, **kwargs
    ) -> ListArtifact | ErrorArtifact: ...

chunk_joiner: str = field(default='\n\n', kw_only=True) class-attribute instance-attribute

chunker: BaseChunker = field(default=Factory(lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), takes_self=True), kw_only=True) class-attribute instance-attribute

max_chunker_tokens: int property

max_token_multiplier: float = field(default=0.5, kw_only=True) class-attribute instance-attribute

min_response_tokens: int property

prompt_driver: BasePromptDriver = field(kw_only=True) class-attribute instance-attribute

extract(text, *, rulesets=None, **kwargs) abstractmethod

Source code in griptape/engines/extraction/base_extraction_engine.py
@abstractmethod
def extract(
    self, text: str | ListArtifact, *, rulesets: Optional[list[Ruleset]] = None, **kwargs
) -> ListArtifact | ErrorArtifact: ...

validate_max_token_multiplier(_, max_token_multiplier)

Source code in griptape/engines/extraction/base_extraction_engine.py
@max_token_multiplier.validator  # pyright: ignore
def validate_max_token_multiplier(self, _, max_token_multiplier: int) -> None:
    if max_token_multiplier > 1:
        raise ValueError("has to be less than or equal to 1")
    elif max_token_multiplier <= 0:
        raise ValueError("has to be greater than 0")

BaseImageGenerationEngine

Bases: ABC

Source code in griptape/engines/image/base_image_generation_engine.py
@define
class BaseImageGenerationEngine(ABC):
    image_generation_driver: BaseImageGenerationDriver = field(kw_only=True)

    @abstractmethod
    def run(self, prompts: list[str], *args, rulesets: Optional[list[Ruleset]], **kwargs) -> ImageArtifact: ...

    def _ruleset_to_prompts(self, prompts: Optional[list[str]], rulesets: Optional[list[Ruleset]]) -> list[str]:
        if not prompts:
            prompts = []

        if rulesets:
            for ruleset in rulesets:
                prompts += [rule.value for rule in ruleset.rules]

        return prompts

image_generation_driver: BaseImageGenerationDriver = field(kw_only=True) class-attribute instance-attribute

run(prompts, *args, rulesets, **kwargs) abstractmethod

Source code in griptape/engines/image/base_image_generation_engine.py
@abstractmethod
def run(self, prompts: list[str], *args, rulesets: Optional[list[Ruleset]], **kwargs) -> ImageArtifact: ...

BaseQueryEngine

Bases: ABC

Source code in griptape/engines/query/base_query_engine.py
@define
class BaseQueryEngine(ABC):
    @abstractmethod
    def query(
        self,
        query: str,
        namespace: Optional[str] = None,
        *,
        rulesets: Optional[list[Ruleset]] = None,
        top_n: Optional[int] = None,
        filter: Optional[dict] = None,
    ) -> TextArtifact: ...

    @abstractmethod
    def load_artifacts(self, namespace: str) -> ListArtifact: ...

    @abstractmethod
    def upsert_text_artifact(self, artifact: TextArtifact, namespace: Optional[str] = None) -> str: ...

    @abstractmethod
    def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None: ...

load_artifacts(namespace) abstractmethod

Source code in griptape/engines/query/base_query_engine.py
@abstractmethod
def load_artifacts(self, namespace: str) -> ListArtifact: ...

query(query, namespace=None, *, rulesets=None, top_n=None, filter=None) abstractmethod

Source code in griptape/engines/query/base_query_engine.py
@abstractmethod
def query(
    self,
    query: str,
    namespace: Optional[str] = None,
    *,
    rulesets: Optional[list[Ruleset]] = None,
    top_n: Optional[int] = None,
    filter: Optional[dict] = None,
) -> TextArtifact: ...

upsert_text_artifact(artifact, namespace=None) abstractmethod

Source code in griptape/engines/query/base_query_engine.py
@abstractmethod
def upsert_text_artifact(self, artifact: TextArtifact, namespace: Optional[str] = None) -> str: ...

upsert_text_artifacts(artifacts, namespace) abstractmethod

Source code in griptape/engines/query/base_query_engine.py
@abstractmethod
def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None: ...

BaseSummaryEngine

Bases: ABC

Source code in griptape/engines/summary/base_summary_engine.py
@define
class BaseSummaryEngine(ABC):
    def summarize_text(self, text: str, *, rulesets: Optional[list[Ruleset]] = None) -> str:
        return self.summarize_artifacts(ListArtifact([TextArtifact(text)]), rulesets=rulesets).value

    @abstractmethod
    def summarize_artifacts(
        self, artifacts: ListArtifact, *, rulesets: Optional[list[Ruleset]] = None
    ) -> TextArtifact: ...

summarize_artifacts(artifacts, *, rulesets=None) abstractmethod

Source code in griptape/engines/summary/base_summary_engine.py
@abstractmethod
def summarize_artifacts(
    self, artifacts: ListArtifact, *, rulesets: Optional[list[Ruleset]] = None
) -> TextArtifact: ...

summarize_text(text, *, rulesets=None)

Source code in griptape/engines/summary/base_summary_engine.py
def summarize_text(self, text: str, *, rulesets: Optional[list[Ruleset]] = None) -> str:
    return self.summarize_artifacts(ListArtifact([TextArtifact(text)]), rulesets=rulesets).value

CsvExtractionEngine

Bases: BaseExtractionEngine

Source code in griptape/engines/extraction/csv_extraction_engine.py
@define
class CsvExtractionEngine(BaseExtractionEngine):
    template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv_extraction.j2")), kw_only=True)

    def extract(
        self,
        text: str | ListArtifact,
        *,
        rulesets: Optional[list[Ruleset]] = None,
        column_names: Optional[list[str]] = None,
        **kwargs,
    ) -> ListArtifact | ErrorArtifact:
        if column_names is None:
            column_names = []
        try:
            return ListArtifact(
                self._extract_rec(
                    cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)],
                    column_names,
                    [],
                    rulesets=rulesets,
                ),
                item_separator="\n",
            )
        except Exception as e:
            return ErrorArtifact(f"error extracting CSV rows: {e}")

    def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]:
        rows = []

        with io.StringIO(text) as f:
            for row in csv.reader(f):
                rows.append(CsvRowArtifact(dict(zip(column_names, [x.strip() for x in row]))))

        return rows

    def _extract_rec(
        self,
        artifacts: list[TextArtifact],
        column_names: list[str],
        rows: list[CsvRowArtifact],
        rulesets: Optional[list[Ruleset]] = None,
    ) -> list[CsvRowArtifact]:
        artifacts_text = self.chunk_joiner.join([a.value for a in artifacts])
        full_text = self.template_generator.render(
            column_names=column_names,
            text=artifacts_text,
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
        )

        if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens:
            rows.extend(
                self.text_to_csv_rows(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
                    ).value,
                    column_names,
                )
            )

            return rows
        else:
            chunks = self.chunker.chunk(artifacts_text)
            partial_text = self.template_generator.render(
                column_names=column_names,
                text=chunks[0].value,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )

            rows.extend(
                self.text_to_csv_rows(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                    ).value,
                    column_names,
                )
            )

            return self._extract_rec(chunks[1:], column_names, rows, rulesets=rulesets)

template_generator: J2 = field(default=Factory(lambda: J2('engines/extraction/csv_extraction.j2')), kw_only=True) class-attribute instance-attribute

extract(text, *, rulesets=None, column_names=None, **kwargs)

Source code in griptape/engines/extraction/csv_extraction_engine.py
def extract(
    self,
    text: str | ListArtifact,
    *,
    rulesets: Optional[list[Ruleset]] = None,
    column_names: Optional[list[str]] = None,
    **kwargs,
) -> ListArtifact | ErrorArtifact:
    if column_names is None:
        column_names = []
    try:
        return ListArtifact(
            self._extract_rec(
                cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)],
                column_names,
                [],
                rulesets=rulesets,
            ),
            item_separator="\n",
        )
    except Exception as e:
        return ErrorArtifact(f"error extracting CSV rows: {e}")

text_to_csv_rows(text, column_names)

Source code in griptape/engines/extraction/csv_extraction_engine.py
def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]:
    rows = []

    with io.StringIO(text) as f:
        for row in csv.reader(f):
            rows.append(CsvRowArtifact(dict(zip(column_names, [x.strip() for x in row]))))

    return rows

ImageQueryEngine

Source code in griptape/engines/image_query/image_query_engine.py
@define
class ImageQueryEngine:
    image_query_driver: BaseImageQueryDriver = field(kw_only=True)

    def run(self, query: str, images: list[ImageArtifact]) -> TextArtifact:
        return self.image_query_driver.query(query, images)

image_query_driver: BaseImageQueryDriver = field(kw_only=True) class-attribute instance-attribute

run(query, images)

Source code in griptape/engines/image_query/image_query_engine.py
def run(self, query: str, images: list[ImageArtifact]) -> TextArtifact:
    return self.image_query_driver.query(query, images)

InpaintingImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/engines/image/inpainting_image_generation_engine.py
@define
class InpaintingImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        *args,
        image: ImageArtifact,
        mask: ImageArtifact,
        negative_prompts: Optional[list[str]] = None,
        rulesets: Optional[list[Ruleset]] = None,
        negative_rulesets: Optional[list[Ruleset]] = None,
        **kwargs,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_inpainting(
            prompts, image=image, mask=mask, negative_prompts=negative_prompts
        )

run(prompts, *args, image, mask, negative_prompts=None, rulesets=None, negative_rulesets=None, **kwargs)

Source code in griptape/engines/image/inpainting_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    *args,
    image: ImageArtifact,
    mask: ImageArtifact,
    negative_prompts: Optional[list[str]] = None,
    rulesets: Optional[list[Ruleset]] = None,
    negative_rulesets: Optional[list[Ruleset]] = None,
    **kwargs,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_inpainting(
        prompts, image=image, mask=mask, negative_prompts=negative_prompts
    )

JsonExtractionEngine

Bases: BaseExtractionEngine

Source code in griptape/engines/extraction/json_extraction_engine.py
@define
class JsonExtractionEngine(BaseExtractionEngine):
    template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json_extraction.j2")), kw_only=True)

    def extract(
        self,
        text: str | ListArtifact,
        *,
        rulesets: Optional[list[Ruleset]] = None,
        template_schema: Optional[list[dict]] = None,
        **kwargs,
    ) -> ListArtifact | ErrorArtifact:
        if template_schema is None:
            template_schema = []
        try:
            json_schema = json.dumps(template_schema)

            return ListArtifact(
                self._extract_rec(
                    cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)],
                    json_schema,
                    [],
                    rulesets=rulesets,
                ),
                item_separator="\n",
            )
        except Exception as e:
            return ErrorArtifact(f"error extracting JSON: {e}")

    def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]:
        return [TextArtifact(json.dumps(e)) for e in json.loads(json_input)]

    def _extract_rec(
        self,
        artifacts: list[TextArtifact],
        json_template_schema: str,
        extractions: list[TextArtifact],
        rulesets: Optional[list[Ruleset]] = None,
    ) -> list[TextArtifact]:
        artifacts_text = self.chunk_joiner.join([a.value for a in artifacts])
        full_text = self.template_generator.render(
            json_template_schema=json_template_schema,
            text=artifacts_text,
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
        )

        if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens:
            extractions.extend(
                self.json_to_text_artifacts(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
                    ).value
                )
            )

            return extractions
        else:
            chunks = self.chunker.chunk(artifacts_text)
            partial_text = self.template_generator.render(
                template_schema=json_template_schema,
                text=chunks[0].value,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )

            extractions.extend(
                self.json_to_text_artifacts(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                    ).value
                )
            )

            return self._extract_rec(chunks[1:], json_template_schema, extractions, rulesets=rulesets)

template_generator: J2 = field(default=Factory(lambda: J2('engines/extraction/json_extraction.j2')), kw_only=True) class-attribute instance-attribute

extract(text, *, rulesets=None, template_schema=None, **kwargs)

Source code in griptape/engines/extraction/json_extraction_engine.py
def extract(
    self,
    text: str | ListArtifact,
    *,
    rulesets: Optional[list[Ruleset]] = None,
    template_schema: Optional[list[dict]] = None,
    **kwargs,
) -> ListArtifact | ErrorArtifact:
    if template_schema is None:
        template_schema = []
    try:
        json_schema = json.dumps(template_schema)

        return ListArtifact(
            self._extract_rec(
                cast(list[TextArtifact], text.value) if isinstance(text, ListArtifact) else [TextArtifact(text)],
                json_schema,
                [],
                rulesets=rulesets,
            ),
            item_separator="\n",
        )
    except Exception as e:
        return ErrorArtifact(f"error extracting JSON: {e}")

json_to_text_artifacts(json_input)

Source code in griptape/engines/extraction/json_extraction_engine.py
def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]:
    return [TextArtifact(json.dumps(e)) for e in json.loads(json_input)]

OutpaintingImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/engines/image/outpainting_image_generation_engine.py
@define
class OutpaintingImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        *args,
        image: ImageArtifact,
        mask: ImageArtifact,
        negative_prompts: Optional[list[str]] = None,
        rulesets: Optional[list[Ruleset]] = None,
        negative_rulesets: Optional[list[Ruleset]] = None,
        **kwargs,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_outpainting(
            prompts, image=image, mask=mask, negative_prompts=negative_prompts
        )

run(prompts, *args, image, mask, negative_prompts=None, rulesets=None, negative_rulesets=None, **kwargs)

Source code in griptape/engines/image/outpainting_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    *args,
    image: ImageArtifact,
    mask: ImageArtifact,
    negative_prompts: Optional[list[str]] = None,
    rulesets: Optional[list[Ruleset]] = None,
    negative_rulesets: Optional[list[Ruleset]] = None,
    **kwargs,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_outpainting(
        prompts, image=image, mask=mask, negative_prompts=negative_prompts
    )

PromptImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/engines/image/prompt_image_generation_engine.py
@define
class PromptImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        *args,
        negative_prompts: Optional[list[str]] = None,
        rulesets: Optional[list[Ruleset]] = None,
        negative_rulesets: Optional[list[Ruleset]] = None,
        **kwargs,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_text_to_image(prompts, negative_prompts=negative_prompts)

run(prompts, *args, negative_prompts=None, rulesets=None, negative_rulesets=None, **kwargs)

Source code in griptape/engines/image/prompt_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    *args,
    negative_prompts: Optional[list[str]] = None,
    rulesets: Optional[list[Ruleset]] = None,
    negative_rulesets: Optional[list[Ruleset]] = None,
    **kwargs,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_text_to_image(prompts, negative_prompts=negative_prompts)

PromptSummaryEngine

Bases: BaseSummaryEngine

Source code in griptape/engines/summary/prompt_summary_engine.py
@define
class PromptSummaryEngine(BaseSummaryEngine):
    chunk_joiner: str = field(default="\n\n", kw_only=True)
    max_token_multiplier: float = field(default=0.5, kw_only=True)
    template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/prompt_summary.j2")), kw_only=True)
    prompt_driver: BasePromptDriver = field(kw_only=True)
    chunker: BaseChunker = field(
        default=Factory(
            lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens),
            takes_self=True,
        ),
        kw_only=True,
    )

    @max_token_multiplier.validator  # pyright: ignore
    def validate_allowlist(self, _, max_token_multiplier: int) -> None:
        if max_token_multiplier > 1:
            raise ValueError("has to be less than or equal to 1")
        elif max_token_multiplier <= 0:
            raise ValueError("has to be greater than 0")

    @property
    def max_chunker_tokens(self) -> int:
        return round(self.prompt_driver.tokenizer.max_input_tokens * self.max_token_multiplier)

    @property
    def min_response_tokens(self) -> int:
        return round(
            self.prompt_driver.tokenizer.max_input_tokens
            - self.prompt_driver.tokenizer.max_input_tokens * self.max_token_multiplier
        )

    def summarize_artifacts(self, artifacts: ListArtifact, *, rulesets: Optional[list[Ruleset]] = None) -> TextArtifact:
        return self.summarize_artifacts_rec(cast(list[TextArtifact], artifacts.value), None, rulesets=rulesets)

    def summarize_artifacts_rec(
        self, artifacts: list[TextArtifact], summary: Optional[str] = None, rulesets: Optional[list[Ruleset]] = None
    ) -> TextArtifact:
        artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts])

        full_text = self.template_generator.render(
            summary=summary, text=artifacts_text, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
        )

        if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens:
            return self.prompt_driver.run(
                PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
            )
        else:
            chunks = self.chunker.chunk(artifacts_text)

            partial_text = self.template_generator.render(
                summary=summary, text=chunks[0].value, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
            )

            return self.summarize_artifacts_rec(
                chunks[1:],
                self.prompt_driver.run(
                    PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                ).value,
                rulesets=rulesets,
            )

chunk_joiner: str = field(default='\n\n', kw_only=True) class-attribute instance-attribute

chunker: BaseChunker = field(default=Factory(lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), takes_self=True), kw_only=True) class-attribute instance-attribute

max_chunker_tokens: int property

max_token_multiplier: float = field(default=0.5, kw_only=True) class-attribute instance-attribute

min_response_tokens: int property

prompt_driver: BasePromptDriver = field(kw_only=True) class-attribute instance-attribute

template_generator: J2 = field(default=Factory(lambda: J2('engines/summary/prompt_summary.j2')), kw_only=True) class-attribute instance-attribute

summarize_artifacts(artifacts, *, rulesets=None)

Source code in griptape/engines/summary/prompt_summary_engine.py
def summarize_artifacts(self, artifacts: ListArtifact, *, rulesets: Optional[list[Ruleset]] = None) -> TextArtifact:
    return self.summarize_artifacts_rec(cast(list[TextArtifact], artifacts.value), None, rulesets=rulesets)

summarize_artifacts_rec(artifacts, summary=None, rulesets=None)

Source code in griptape/engines/summary/prompt_summary_engine.py
def summarize_artifacts_rec(
    self, artifacts: list[TextArtifact], summary: Optional[str] = None, rulesets: Optional[list[Ruleset]] = None
) -> TextArtifact:
    artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts])

    full_text = self.template_generator.render(
        summary=summary, text=artifacts_text, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
    )

    if self.prompt_driver.tokenizer.count_input_tokens_left(full_text) >= self.min_response_tokens:
        return self.prompt_driver.run(
            PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
        )
    else:
        chunks = self.chunker.chunk(artifacts_text)

        partial_text = self.template_generator.render(
            summary=summary, text=chunks[0].value, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
        )

        return self.summarize_artifacts_rec(
            chunks[1:],
            self.prompt_driver.run(
                PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
            ).value,
            rulesets=rulesets,
        )

validate_allowlist(_, max_token_multiplier)

Source code in griptape/engines/summary/prompt_summary_engine.py
@max_token_multiplier.validator  # pyright: ignore
def validate_allowlist(self, _, max_token_multiplier: int) -> None:
    if max_token_multiplier > 1:
        raise ValueError("has to be less than or equal to 1")
    elif max_token_multiplier <= 0:
        raise ValueError("has to be greater than 0")

TextToSpeechEngine

Source code in griptape/engines/audio/text_to_speech_engine.py
@define
class TextToSpeechEngine:
    text_to_speech_driver: BaseTextToSpeechDriver = field(kw_only=True)

    def run(self, prompts: list[str], *args, **kwargs) -> AudioArtifact:
        return self.text_to_speech_driver.try_text_to_audio(prompts=prompts)

text_to_speech_driver: BaseTextToSpeechDriver = field(kw_only=True) class-attribute instance-attribute

run(prompts, *args, **kwargs)

Source code in griptape/engines/audio/text_to_speech_engine.py
def run(self, prompts: list[str], *args, **kwargs) -> AudioArtifact:
    return self.text_to_speech_driver.try_text_to_audio(prompts=prompts)

VariationImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/engines/image/variation_image_generation_engine.py
@define
class VariationImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        *args,
        image: ImageArtifact,
        negative_prompts: Optional[list[str]] = None,
        rulesets: Optional[list[Ruleset]] = None,
        negative_rulesets: Optional[list[Ruleset]] = None,
        **kwargs,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_variation(
            prompts=prompts, image=image, negative_prompts=negative_prompts
        )

run(prompts, *args, image, negative_prompts=None, rulesets=None, negative_rulesets=None, **kwargs)

Source code in griptape/engines/image/variation_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    *args,
    image: ImageArtifact,
    negative_prompts: Optional[list[str]] = None,
    rulesets: Optional[list[Ruleset]] = None,
    negative_rulesets: Optional[list[Ruleset]] = None,
    **kwargs,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_variation(
        prompts=prompts, image=image, negative_prompts=negative_prompts
    )

VectorQueryEngine

Bases: BaseQueryEngine

Source code in griptape/engines/query/vector_query_engine.py
@define
class VectorQueryEngine(BaseQueryEngine):
    answer_token_offset: int = field(default=400, kw_only=True)
    vector_store_driver: BaseVectorStoreDriver = field(kw_only=True)
    prompt_driver: BasePromptDriver = field(kw_only=True)
    user_template_generator: J2 = field(default=Factory(lambda: J2("engines/query/user.j2")), kw_only=True)
    system_template_generator: J2 = field(default=Factory(lambda: J2("engines/query/system.j2")), kw_only=True)

    def query(
        self,
        query: str,
        namespace: Optional[str] = None,
        *,
        rulesets: Optional[list[Ruleset]] = None,
        metadata: Optional[str] = None,
        top_n: Optional[int] = None,
        filter: Optional[dict] = None,
    ) -> TextArtifact:
        tokenizer = self.prompt_driver.tokenizer
        result = self.vector_store_driver.query(query, top_n, namespace, filter=filter)
        artifacts = [
            artifact
            for artifact in [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta]
            if isinstance(artifact, TextArtifact)
        ]
        text_segments = []
        user_message = ""
        system_message = ""

        for artifact in artifacts:
            text_segments.append(artifact.value)
            system_message = self.system_template_generator.render(
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
                metadata=metadata,
                text_segments=text_segments,
            )
            user_message = self.user_template_generator.render(query=query)

            message_token_count = self.prompt_driver.tokenizer.count_input_tokens_left(
                self.prompt_driver.prompt_stack_to_string(
                    PromptStack(
                        inputs=[
                            PromptStack.Input(system_message, role=PromptStack.SYSTEM_ROLE),
                            PromptStack.Input(user_message, role=PromptStack.USER_ROLE),
                        ]
                    )
                )
            )

            if message_token_count + self.answer_token_offset >= tokenizer.max_input_tokens:
                text_segments.pop()

                system_message = self.system_template_generator.render(
                    rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
                    metadata=metadata,
                    text_segments=text_segments,
                )

                break

        return self.prompt_driver.run(
            PromptStack(
                inputs=[
                    PromptStack.Input(system_message, role=PromptStack.SYSTEM_ROLE),
                    PromptStack.Input(user_message, role=PromptStack.USER_ROLE),
                ]
            )
        )

    def upsert_text_artifact(self, artifact: TextArtifact, namespace: Optional[str] = None) -> str:
        result = self.vector_store_driver.upsert_text_artifact(artifact, namespace=namespace)

        return result

    def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
        self.vector_store_driver.upsert_text_artifacts({namespace: artifacts})

    def load_artifacts(self, namespace: str) -> ListArtifact:
        result = self.vector_store_driver.load_entries(namespace)
        artifacts = [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta and r.meta.get("artifact")]

        return ListArtifact([a for a in artifacts if isinstance(a, TextArtifact)])

answer_token_offset: int = field(default=400, kw_only=True) class-attribute instance-attribute

prompt_driver: BasePromptDriver = field(kw_only=True) class-attribute instance-attribute

system_template_generator: J2 = field(default=Factory(lambda: J2('engines/query/system.j2')), kw_only=True) class-attribute instance-attribute

user_template_generator: J2 = field(default=Factory(lambda: J2('engines/query/user.j2')), kw_only=True) class-attribute instance-attribute

vector_store_driver: BaseVectorStoreDriver = field(kw_only=True) class-attribute instance-attribute

load_artifacts(namespace)

Source code in griptape/engines/query/vector_query_engine.py
def load_artifacts(self, namespace: str) -> ListArtifact:
    result = self.vector_store_driver.load_entries(namespace)
    artifacts = [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta and r.meta.get("artifact")]

    return ListArtifact([a for a in artifacts if isinstance(a, TextArtifact)])

query(query, namespace=None, *, rulesets=None, metadata=None, top_n=None, filter=None)

Source code in griptape/engines/query/vector_query_engine.py
def query(
    self,
    query: str,
    namespace: Optional[str] = None,
    *,
    rulesets: Optional[list[Ruleset]] = None,
    metadata: Optional[str] = None,
    top_n: Optional[int] = None,
    filter: Optional[dict] = None,
) -> TextArtifact:
    tokenizer = self.prompt_driver.tokenizer
    result = self.vector_store_driver.query(query, top_n, namespace, filter=filter)
    artifacts = [
        artifact
        for artifact in [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta]
        if isinstance(artifact, TextArtifact)
    ]
    text_segments = []
    user_message = ""
    system_message = ""

    for artifact in artifacts:
        text_segments.append(artifact.value)
        system_message = self.system_template_generator.render(
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            metadata=metadata,
            text_segments=text_segments,
        )
        user_message = self.user_template_generator.render(query=query)

        message_token_count = self.prompt_driver.tokenizer.count_input_tokens_left(
            self.prompt_driver.prompt_stack_to_string(
                PromptStack(
                    inputs=[
                        PromptStack.Input(system_message, role=PromptStack.SYSTEM_ROLE),
                        PromptStack.Input(user_message, role=PromptStack.USER_ROLE),
                    ]
                )
            )
        )

        if message_token_count + self.answer_token_offset >= tokenizer.max_input_tokens:
            text_segments.pop()

            system_message = self.system_template_generator.render(
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
                metadata=metadata,
                text_segments=text_segments,
            )

            break

    return self.prompt_driver.run(
        PromptStack(
            inputs=[
                PromptStack.Input(system_message, role=PromptStack.SYSTEM_ROLE),
                PromptStack.Input(user_message, role=PromptStack.USER_ROLE),
            ]
        )
    )

upsert_text_artifact(artifact, namespace=None)

Source code in griptape/engines/query/vector_query_engine.py
def upsert_text_artifact(self, artifact: TextArtifact, namespace: Optional[str] = None) -> str:
    result = self.vector_store_driver.upsert_text_artifact(artifact, namespace=namespace)

    return result

upsert_text_artifacts(artifacts, namespace)

Source code in griptape/engines/query/vector_query_engine.py
def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
    self.vector_store_driver.upsert_text_artifacts({namespace: artifacts})