Skip to content

Engines

__all__ = ['BaseQueryEngine', 'VectorQueryEngine', 'BaseSummaryEngine', 'PromptSummaryEngine', 'BaseExtractionEngine', 'CsvExtractionEngine', 'JsonExtractionEngine', 'BaseImageGenerationEngine', 'PromptImageGenerationEngine', 'VariationImageGenerationEngine', 'InpaintingImageGenerationEngine', 'OutpaintingImageGenerationEngine'] module-attribute

BaseExtractionEngine

Bases: ABC

Source code in griptape/griptape/engines/extraction/base_extraction_engine.py
@define
class BaseExtractionEngine(ABC):
    max_token_multiplier: float = field(default=0.5, kw_only=True)
    chunk_joiner: str = field(default="\n\n", kw_only=True)
    prompt_driver: BasePromptDriver = field(
        default=Factory(lambda: OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)),
        kw_only=True,
    )
    chunker: BaseChunker = field(
        default=Factory(
            lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens),
            takes_self=True,
        ),
        kw_only=True,
    )

    @max_token_multiplier.validator  # pyright: ignore
    def validate_max_token_multiplier(self, _, max_token_multiplier: int) -> None:
        if max_token_multiplier > 1:
            raise ValueError("has to be less than or equal to 1")
        elif max_token_multiplier <= 0:
            raise ValueError("has to be greater than 0")

    @property
    def max_chunker_tokens(self) -> int:
        return round(self.prompt_driver.tokenizer.max_tokens * self.max_token_multiplier)

    @property
    def min_response_tokens(self) -> int:
        return round(
            self.prompt_driver.tokenizer.max_tokens
            - self.prompt_driver.tokenizer.max_tokens * self.max_token_multiplier
        )

    @abstractmethod
    def extract(self, text: str | ListArtifact, rulesets: list[Ruleset] | None = None, **kwargs) -> ListArtifact:
        ...

chunk_joiner: str = field(default='\n\n', kw_only=True) class-attribute instance-attribute

chunker: BaseChunker = field(default=Factory(lambda : TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), takes_self=True), kw_only=True) class-attribute instance-attribute

max_chunker_tokens: int property

max_token_multiplier: float = field(default=0.5, kw_only=True) class-attribute instance-attribute

min_response_tokens: int property

prompt_driver: BasePromptDriver = field(default=Factory(lambda : OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)), kw_only=True) class-attribute instance-attribute

extract(text, rulesets=None, **kwargs) abstractmethod

Source code in griptape/griptape/engines/extraction/base_extraction_engine.py
@abstractmethod
def extract(self, text: str | ListArtifact, rulesets: list[Ruleset] | None = None, **kwargs) -> ListArtifact:
    ...

validate_max_token_multiplier(_, max_token_multiplier)

Source code in griptape/griptape/engines/extraction/base_extraction_engine.py
@max_token_multiplier.validator  # pyright: ignore
def validate_max_token_multiplier(self, _, max_token_multiplier: int) -> None:
    if max_token_multiplier > 1:
        raise ValueError("has to be less than or equal to 1")
    elif max_token_multiplier <= 0:
        raise ValueError("has to be greater than 0")

BaseImageGenerationEngine

Source code in griptape/griptape/engines/image/base_image_generation_engine.py
@define
class BaseImageGenerationEngine:
    image_generation_driver: BaseImageGenerationDriver = field(kw_only=True)

    def _ruleset_to_prompts(self, prompts: list[str] | None, rulesets: list[Ruleset] | None) -> list[str]:
        if not prompts:
            prompts = []

        if rulesets:
            for ruleset in rulesets:
                prompts += [rule.value for rule in ruleset.rules]

        return prompts

image_generation_driver: BaseImageGenerationDriver = field(kw_only=True) class-attribute instance-attribute

BaseQueryEngine

Bases: ABC

Source code in griptape/griptape/engines/query/base_query_engine.py
@define
class BaseQueryEngine(ABC):
    @abstractmethod
    def query(self, query: str, namespace: str | None = None, rulesets: list[Ruleset] | None = None) -> TextArtifact:
        ...

    @abstractmethod
    def load_artifacts(self, namespace: str) -> ListArtifact:
        ...

    @abstractmethod
    def upsert_text_artifact(self, artifact: TextArtifact, namespace: str | None = None) -> str:
        ...

    @abstractmethod
    def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
        ...

load_artifacts(namespace) abstractmethod

Source code in griptape/griptape/engines/query/base_query_engine.py
@abstractmethod
def load_artifacts(self, namespace: str) -> ListArtifact:
    ...

query(query, namespace=None, rulesets=None) abstractmethod

Source code in griptape/griptape/engines/query/base_query_engine.py
@abstractmethod
def query(self, query: str, namespace: str | None = None, rulesets: list[Ruleset] | None = None) -> TextArtifact:
    ...

upsert_text_artifact(artifact, namespace=None) abstractmethod

Source code in griptape/griptape/engines/query/base_query_engine.py
@abstractmethod
def upsert_text_artifact(self, artifact: TextArtifact, namespace: str | None = None) -> str:
    ...

upsert_text_artifacts(artifacts, namespace) abstractmethod

Source code in griptape/griptape/engines/query/base_query_engine.py
@abstractmethod
def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
    ...

BaseSummaryEngine

Bases: ABC

Source code in griptape/griptape/engines/summary/base_summary_engine.py
@define
class BaseSummaryEngine(ABC):
    def summarize_text(self, text: str, rulesets: Optional[list[Ruleset]] = None) -> str:
        return self.summarize_artifacts(ListArtifact([TextArtifact(text)]), rulesets=rulesets).value

    @abstractmethod
    def summarize_artifacts(self, artifacts: ListArtifact, rulesets: Optional[list[Ruleset]] = None) -> TextArtifact:
        ...

summarize_artifacts(artifacts, rulesets=None) abstractmethod

Source code in griptape/griptape/engines/summary/base_summary_engine.py
@abstractmethod
def summarize_artifacts(self, artifacts: ListArtifact, rulesets: Optional[list[Ruleset]] = None) -> TextArtifact:
    ...

summarize_text(text, rulesets=None)

Source code in griptape/griptape/engines/summary/base_summary_engine.py
def summarize_text(self, text: str, rulesets: Optional[list[Ruleset]] = None) -> str:
    return self.summarize_artifacts(ListArtifact([TextArtifact(text)]), rulesets=rulesets).value

CsvExtractionEngine

Bases: BaseExtractionEngine

Source code in griptape/griptape/engines/extraction/csv_extraction_engine.py
@define
class CsvExtractionEngine(BaseExtractionEngine):
    template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/csv_extraction.j2")), kw_only=True)

    def extract(
        self, text: str | ListArtifact, column_names: list[str], rulesets: Ruleset | None = None
    ) -> ListArtifact | ErrorArtifact:
        try:
            return ListArtifact(
                self._extract_rec(
                    text.value if isinstance(text, ListArtifact) else [TextArtifact(text)],
                    column_names,
                    [],
                    rulesets=rulesets,
                ),
                item_separator="\n",
            )
        except Exception as e:
            return ErrorArtifact(f"error extracting CSV rows: {e}")

    def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]:
        rows = []

        with io.StringIO(text) as f:
            for row in csv.reader(f):
                rows.append(CsvRowArtifact(dict(zip(column_names, [x.strip() for x in row]))))

        return rows

    def _extract_rec(
        self,
        artifacts: list[TextArtifact],
        column_names: list[str],
        rows: list[CsvRowArtifact],
        rulesets: Ruleset | None = None,
    ) -> list[CsvRowArtifact]:
        artifacts_text = self.chunk_joiner.join([a.value for a in artifacts])
        full_text = self.template_generator.render(
            column_names=column_names,
            text=artifacts_text,
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
        )

        if self.prompt_driver.tokenizer.count_tokens_left(full_text) >= self.min_response_tokens:
            rows.extend(
                self.text_to_csv_rows(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
                    ).value,
                    column_names,
                )
            )

            return rows
        else:
            chunks = self.chunker.chunk(artifacts_text)
            partial_text = self.template_generator.render(
                column_names=column_names,
                text=chunks[0].value,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )

            rows.extend(
                self.text_to_csv_rows(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                    ).value,
                    column_names,
                )
            )

            return self._extract_rec(chunks[1:], column_names, rows, rulesets=rulesets)

template_generator: J2 = field(default=Factory(lambda : J2('engines/extraction/csv_extraction.j2')), kw_only=True) class-attribute instance-attribute

extract(text, column_names, rulesets=None)

Source code in griptape/griptape/engines/extraction/csv_extraction_engine.py
def extract(
    self, text: str | ListArtifact, column_names: list[str], rulesets: Ruleset | None = None
) -> ListArtifact | ErrorArtifact:
    try:
        return ListArtifact(
            self._extract_rec(
                text.value if isinstance(text, ListArtifact) else [TextArtifact(text)],
                column_names,
                [],
                rulesets=rulesets,
            ),
            item_separator="\n",
        )
    except Exception as e:
        return ErrorArtifact(f"error extracting CSV rows: {e}")

text_to_csv_rows(text, column_names)

Source code in griptape/griptape/engines/extraction/csv_extraction_engine.py
def text_to_csv_rows(self, text: str, column_names: list[str]) -> list[CsvRowArtifact]:
    rows = []

    with io.StringIO(text) as f:
        for row in csv.reader(f):
            rows.append(CsvRowArtifact(dict(zip(column_names, [x.strip() for x in row]))))

    return rows

InpaintingImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/griptape/engines/image/inpainting_image_generation_engine.py
@define
class InpaintingImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        image: ImageArtifact,
        mask: ImageArtifact,
        negative_prompts: list[str] | None = None,
        rulesets: list[Ruleset] | None = None,
        negative_rulesets: list[Ruleset] | None = None,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_inpainting(
            prompts, image=image, mask=mask, negative_prompts=negative_prompts
        )

run(prompts, image, mask, negative_prompts=None, rulesets=None, negative_rulesets=None)

Source code in griptape/griptape/engines/image/inpainting_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    image: ImageArtifact,
    mask: ImageArtifact,
    negative_prompts: list[str] | None = None,
    rulesets: list[Ruleset] | None = None,
    negative_rulesets: list[Ruleset] | None = None,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_inpainting(
        prompts, image=image, mask=mask, negative_prompts=negative_prompts
    )

JsonExtractionEngine

Bases: BaseExtractionEngine

Source code in griptape/griptape/engines/extraction/json_extraction_engine.py
@define
class JsonExtractionEngine(BaseExtractionEngine):
    template_generator: J2 = field(default=Factory(lambda: J2("engines/extraction/json_extraction.j2")), kw_only=True)

    def extract(
        self, text: str | ListArtifact, template_schema: dict, rulesets: Ruleset | None = None
    ) -> ListArtifact | ErrorArtifact:
        try:
            json_schema = json.dumps(template_schema)

            return ListArtifact(
                self._extract_rec(
                    text.value if isinstance(text, ListArtifact) else [TextArtifact(text)],
                    json_schema,
                    [],
                    rulesets=rulesets,
                ),
                item_separator="\n",
            )
        except Exception as e:
            return ErrorArtifact(f"error extracting JSON: {e}")

    def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]:
        return [TextArtifact(e) for e in json.loads(json_input)]

    def _extract_rec(
        self,
        artifacts: list[TextArtifact],
        json_template_schema: str,
        extractions: list[TextArtifact],
        rulesets: Ruleset | None = None,
    ) -> list[TextArtifact]:
        artifacts_text = self.chunk_joiner.join([a.value for a in artifacts])
        full_text = self.template_generator.render(
            json_template_schema=json_template_schema,
            text=artifacts_text,
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
        )

        if self.prompt_driver.tokenizer.count_tokens_left(full_text) >= self.min_response_tokens:
            extractions.extend(
                self.json_to_text_artifacts(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
                    ).value
                )
            )

            return extractions
        else:
            chunks = self.chunker.chunk(artifacts_text)
            partial_text = self.template_generator.render(
                template_schema=json_template_schema,
                text=chunks[0].value,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )

            extractions.extend(
                self.json_to_text_artifacts(
                    self.prompt_driver.run(
                        PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                    ).value
                )
            )

            return self._extract_rec(chunks[1:], json_template_schema, extractions, rulesets=rulesets)

template_generator: J2 = field(default=Factory(lambda : J2('engines/extraction/json_extraction.j2')), kw_only=True) class-attribute instance-attribute

extract(text, template_schema, rulesets=None)

Source code in griptape/griptape/engines/extraction/json_extraction_engine.py
def extract(
    self, text: str | ListArtifact, template_schema: dict, rulesets: Ruleset | None = None
) -> ListArtifact | ErrorArtifact:
    try:
        json_schema = json.dumps(template_schema)

        return ListArtifact(
            self._extract_rec(
                text.value if isinstance(text, ListArtifact) else [TextArtifact(text)],
                json_schema,
                [],
                rulesets=rulesets,
            ),
            item_separator="\n",
        )
    except Exception as e:
        return ErrorArtifact(f"error extracting JSON: {e}")

json_to_text_artifacts(json_input)

Source code in griptape/griptape/engines/extraction/json_extraction_engine.py
def json_to_text_artifacts(self, json_input: str) -> list[TextArtifact]:
    return [TextArtifact(e) for e in json.loads(json_input)]

OutpaintingImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/griptape/engines/image/outpainting_image_generation_engine.py
@define
class OutpaintingImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        image: ImageArtifact,
        mask: ImageArtifact,
        negative_prompts: list[str] | None = None,
        rulesets: list[Ruleset] | None = None,
        negative_rulesets: list[Ruleset] | None = None,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_outpainting(
            prompts, image=image, mask=mask, negative_prompts=negative_prompts
        )

run(prompts, image, mask, negative_prompts=None, rulesets=None, negative_rulesets=None)

Source code in griptape/griptape/engines/image/outpainting_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    image: ImageArtifact,
    mask: ImageArtifact,
    negative_prompts: list[str] | None = None,
    rulesets: list[Ruleset] | None = None,
    negative_rulesets: list[Ruleset] | None = None,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_outpainting(
        prompts, image=image, mask=mask, negative_prompts=negative_prompts
    )

PromptImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/griptape/engines/image/prompt_image_generation_engine.py
@define
class PromptImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        negative_prompts: list[str] | None = None,
        rulesets: list[Ruleset] | None = None,
        negative_rulesets: list[Ruleset] | None = None,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_text_to_image(prompts, negative_prompts=negative_prompts)

run(prompts, negative_prompts=None, rulesets=None, negative_rulesets=None)

Source code in griptape/griptape/engines/image/prompt_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    negative_prompts: list[str] | None = None,
    rulesets: list[Ruleset] | None = None,
    negative_rulesets: list[Ruleset] | None = None,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_text_to_image(prompts, negative_prompts=negative_prompts)

PromptSummaryEngine

Bases: BaseSummaryEngine

Source code in griptape/griptape/engines/summary/prompt_summary_engine.py
@define
class PromptSummaryEngine(BaseSummaryEngine):
    chunk_joiner: str = field(default="\n\n", kw_only=True)
    max_token_multiplier: float = field(default=0.5, kw_only=True)
    template_generator: J2 = field(default=Factory(lambda: J2("engines/summary/prompt_summary.j2")), kw_only=True)
    prompt_driver: BasePromptDriver = field(
        default=Factory(lambda: OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)),
        kw_only=True,
    )
    chunker: BaseChunker = field(
        default=Factory(
            lambda self: TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens),
            takes_self=True,
        ),
        kw_only=True,
    )

    @max_token_multiplier.validator  # pyright: ignore
    def validate_allowlist(self, _, max_token_multiplier: int) -> None:
        if max_token_multiplier > 1:
            raise ValueError("has to be less than or equal to 1")
        elif max_token_multiplier <= 0:
            raise ValueError("has to be greater than 0")

    @property
    def max_chunker_tokens(self) -> int:
        return round(self.prompt_driver.tokenizer.max_tokens * self.max_token_multiplier)

    @property
    def min_response_tokens(self) -> int:
        return round(
            self.prompt_driver.tokenizer.max_tokens
            - self.prompt_driver.tokenizer.max_tokens * self.max_token_multiplier
        )

    def summarize_artifacts(self, artifacts: ListArtifact, rulesets: Optional[Ruleset] = None) -> TextArtifact:
        return self.summarize_artifacts_rec(artifacts.value, None, rulesets=rulesets)

    def summarize_artifacts_rec(
        self, artifacts: list[BaseArtifact], summary: Optional[str], rulesets: Optional[Ruleset] = None
    ) -> TextArtifact:
        artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts])

        full_text = self.template_generator.render(
            summary=summary, text=artifacts_text, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
        )

        if self.prompt_driver.tokenizer.count_tokens_left(full_text) >= self.min_response_tokens:
            return self.prompt_driver.run(
                PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
            )
        else:
            chunks = self.chunker.chunk(artifacts_text)

            partial_text = self.template_generator.render(
                summary=summary, text=chunks[0].value, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
            )

            return self.summarize_artifacts_rec(
                chunks[1:],
                self.prompt_driver.run(
                    PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
                ).value,
                rulesets=rulesets,
            )

chunk_joiner: str = field(default='\n\n', kw_only=True) class-attribute instance-attribute

chunker: BaseChunker = field(default=Factory(lambda : TextChunker(tokenizer=self.prompt_driver.tokenizer, max_tokens=self.max_chunker_tokens), takes_self=True), kw_only=True) class-attribute instance-attribute

max_chunker_tokens: int property

max_token_multiplier: float = field(default=0.5, kw_only=True) class-attribute instance-attribute

min_response_tokens: int property

prompt_driver: BasePromptDriver = field(default=Factory(lambda : OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)), kw_only=True) class-attribute instance-attribute

template_generator: J2 = field(default=Factory(lambda : J2('engines/summary/prompt_summary.j2')), kw_only=True) class-attribute instance-attribute

summarize_artifacts(artifacts, rulesets=None)

Source code in griptape/griptape/engines/summary/prompt_summary_engine.py
def summarize_artifacts(self, artifacts: ListArtifact, rulesets: Optional[Ruleset] = None) -> TextArtifact:
    return self.summarize_artifacts_rec(artifacts.value, None, rulesets=rulesets)

summarize_artifacts_rec(artifacts, summary, rulesets=None)

Source code in griptape/griptape/engines/summary/prompt_summary_engine.py
def summarize_artifacts_rec(
    self, artifacts: list[BaseArtifact], summary: Optional[str], rulesets: Optional[Ruleset] = None
) -> TextArtifact:
    artifacts_text = self.chunk_joiner.join([a.to_text() for a in artifacts])

    full_text = self.template_generator.render(
        summary=summary, text=artifacts_text, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
    )

    if self.prompt_driver.tokenizer.count_tokens_left(full_text) >= self.min_response_tokens:
        return self.prompt_driver.run(
            PromptStack(inputs=[PromptStack.Input(full_text, role=PromptStack.USER_ROLE)])
        )
    else:
        chunks = self.chunker.chunk(artifacts_text)

        partial_text = self.template_generator.render(
            summary=summary, text=chunks[0].value, rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets)
        )

        return self.summarize_artifacts_rec(
            chunks[1:],
            self.prompt_driver.run(
                PromptStack(inputs=[PromptStack.Input(partial_text, role=PromptStack.USER_ROLE)])
            ).value,
            rulesets=rulesets,
        )

validate_allowlist(_, max_token_multiplier)

Source code in griptape/griptape/engines/summary/prompt_summary_engine.py
@max_token_multiplier.validator  # pyright: ignore
def validate_allowlist(self, _, max_token_multiplier: int) -> None:
    if max_token_multiplier > 1:
        raise ValueError("has to be less than or equal to 1")
    elif max_token_multiplier <= 0:
        raise ValueError("has to be greater than 0")

VariationImageGenerationEngine

Bases: BaseImageGenerationEngine

Source code in griptape/griptape/engines/image/variation_image_generation_engine.py
@define
class VariationImageGenerationEngine(BaseImageGenerationEngine):
    def run(
        self,
        prompts: list[str],
        image: ImageArtifact,
        negative_prompts: list[str] | None = None,
        rulesets: list[Ruleset] | None = None,
        negative_rulesets: list[Ruleset] | None = None,
    ) -> ImageArtifact:
        prompts = self._ruleset_to_prompts(prompts, rulesets)
        negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

        return self.image_generation_driver.run_image_variation(
            prompts=prompts, image=image, negative_prompts=negative_prompts
        )

run(prompts, image, negative_prompts=None, rulesets=None, negative_rulesets=None)

Source code in griptape/griptape/engines/image/variation_image_generation_engine.py
def run(
    self,
    prompts: list[str],
    image: ImageArtifact,
    negative_prompts: list[str] | None = None,
    rulesets: list[Ruleset] | None = None,
    negative_rulesets: list[Ruleset] | None = None,
) -> ImageArtifact:
    prompts = self._ruleset_to_prompts(prompts, rulesets)
    negative_prompts = self._ruleset_to_prompts(negative_prompts, negative_rulesets)

    return self.image_generation_driver.run_image_variation(
        prompts=prompts, image=image, negative_prompts=negative_prompts
    )

VectorQueryEngine

Bases: BaseQueryEngine

Source code in griptape/griptape/engines/query/vector_query_engine.py
@define
class VectorQueryEngine(BaseQueryEngine):
    answer_token_offset: int = field(default=400, kw_only=True)
    vector_store_driver: BaseVectorStoreDriver = field(kw_only=True)
    prompt_driver: BasePromptDriver = field(
        default=Factory(lambda: OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)),
        kw_only=True,
    )
    template_generator: J2 = field(default=Factory(lambda: J2("engines/query/vector_query.j2")), kw_only=True)

    def query(
        self,
        query: str,
        namespace: str | None = None,
        rulesets: list[Ruleset] | None = None,
        metadata: str | None = None,
        top_n: int | None = None,
    ) -> TextArtifact:
        tokenizer = self.prompt_driver.tokenizer
        result = self.vector_store_driver.query(query, top_n, namespace)
        artifacts = [
            artifact
            for artifact in [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta]
            if isinstance(artifact, TextArtifact)
        ]
        text_segments = []
        message = ""

        for artifact in artifacts:
            text_segments.append(artifact.value)

            message = self.template_generator.render(
                metadata=metadata,
                query=query,
                text_segments=text_segments,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )
            message_token_count = self.prompt_driver.token_count(
                PromptStack(inputs=[PromptStack.Input(message, role=PromptStack.USER_ROLE)])
            )

            if message_token_count + self.answer_token_offset >= tokenizer.max_tokens:
                text_segments.pop()

                message = self.template_generator.render(
                    metadata=metadata,
                    query=query,
                    text_segments=text_segments,
                    rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
                )

                break

        return self.prompt_driver.run(PromptStack(inputs=[PromptStack.Input(message, role=PromptStack.USER_ROLE)]))

    def upsert_text_artifact(self, artifact: TextArtifact, namespace: str | None = None) -> str:
        result = self.vector_store_driver.upsert_text_artifact(artifact, namespace=namespace)

        return result

    def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
        self.vector_store_driver.upsert_text_artifacts({namespace: artifacts})

    def load_artifacts(self, namespace: str) -> ListArtifact:
        result = self.vector_store_driver.load_entries(namespace)
        artifacts = [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta and r.meta.get("artifact")]

        return ListArtifact([a for a in artifacts if isinstance(a, TextArtifact)])

answer_token_offset: int = field(default=400, kw_only=True) class-attribute instance-attribute

prompt_driver: BasePromptDriver = field(default=Factory(lambda : OpenAiChatPromptDriver(model=OpenAiTokenizer.DEFAULT_OPENAI_GPT_3_CHAT_MODEL)), kw_only=True) class-attribute instance-attribute

template_generator: J2 = field(default=Factory(lambda : J2('engines/query/vector_query.j2')), kw_only=True) class-attribute instance-attribute

vector_store_driver: BaseVectorStoreDriver = field(kw_only=True) class-attribute instance-attribute

load_artifacts(namespace)

Source code in griptape/griptape/engines/query/vector_query_engine.py
def load_artifacts(self, namespace: str) -> ListArtifact:
    result = self.vector_store_driver.load_entries(namespace)
    artifacts = [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta and r.meta.get("artifact")]

    return ListArtifact([a for a in artifacts if isinstance(a, TextArtifact)])

query(query, namespace=None, rulesets=None, metadata=None, top_n=None)

Source code in griptape/griptape/engines/query/vector_query_engine.py
def query(
    self,
    query: str,
    namespace: str | None = None,
    rulesets: list[Ruleset] | None = None,
    metadata: str | None = None,
    top_n: int | None = None,
) -> TextArtifact:
    tokenizer = self.prompt_driver.tokenizer
    result = self.vector_store_driver.query(query, top_n, namespace)
    artifacts = [
        artifact
        for artifact in [BaseArtifact.from_json(r.meta["artifact"]) for r in result if r.meta]
        if isinstance(artifact, TextArtifact)
    ]
    text_segments = []
    message = ""

    for artifact in artifacts:
        text_segments.append(artifact.value)

        message = self.template_generator.render(
            metadata=metadata,
            query=query,
            text_segments=text_segments,
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
        )
        message_token_count = self.prompt_driver.token_count(
            PromptStack(inputs=[PromptStack.Input(message, role=PromptStack.USER_ROLE)])
        )

        if message_token_count + self.answer_token_offset >= tokenizer.max_tokens:
            text_segments.pop()

            message = self.template_generator.render(
                metadata=metadata,
                query=query,
                text_segments=text_segments,
                rulesets=J2("rulesets/rulesets.j2").render(rulesets=rulesets),
            )

            break

    return self.prompt_driver.run(PromptStack(inputs=[PromptStack.Input(message, role=PromptStack.USER_ROLE)]))

upsert_text_artifact(artifact, namespace=None)

Source code in griptape/griptape/engines/query/vector_query_engine.py
def upsert_text_artifact(self, artifact: TextArtifact, namespace: str | None = None) -> str:
    result = self.vector_store_driver.upsert_text_artifact(artifact, namespace=namespace)

    return result

upsert_text_artifacts(artifacts, namespace)

Source code in griptape/griptape/engines/query/vector_query_engine.py
def upsert_text_artifacts(self, artifacts: list[TextArtifact], namespace: str) -> None:
    self.vector_store_driver.upsert_text_artifacts({namespace: artifacts})