Skip to content

Tasks

__all__ = ['BaseTask', 'BaseTextInputTask', 'PromptTask', 'ActionSubtask', 'ToolkitTask', 'TextSummaryTask', 'ToolTask', 'TextQueryTask', 'ExtractionTask', 'BaseImageGenerationTask', 'CodeExecutionTask', 'PromptImageGenerationTask', 'VariationImageGenerationTask', 'InpaintingImageGenerationTask', 'OutpaintingImageGenerationTask'] module-attribute

ActionSubtask

Bases: PromptTask

Source code in griptape/griptape/tasks/action_subtask.py
@define
class ActionSubtask(PromptTask):
    THOUGHT_PATTERN = r"(?s)^Thought:\s*(.*?)$"
    ACTION_PATTERN = r"(?s)Action:[^{]*({.*})"
    ANSWER_PATTERN = r"(?s)^Answer:\s?([\s\S]*)$"
    ACTION_SCHEMA = Schema(
        description="Actions have name, path, and input object.",
        schema={
            Literal("name", description="Action name"): str,
            Literal("path", description="Action path"): str,
            schema.Optional(Literal("input", description="Optional action path input values object")): {"values": dict},
        },
    )

    _input: str | None = field(default=None)
    parent_task_id: str | None = field(default=None, kw_only=True)
    thought: str | None = field(default=None, kw_only=True)
    action_name: str | None = field(default=None, kw_only=True)
    action_path: str | None = field(default=None, kw_only=True)
    action_input: dict | None = field(default=None, kw_only=True)

    _tool: BaseTool | None = None
    _memory: TaskMemory | None = None

    @property
    def input(self) -> TextArtifact:
        return TextArtifact(self._input)

    @property
    def origin_task(self) -> ActionSubtaskOriginMixin | None:
        return self.structure.find_task(self.parent_task_id)

    @property
    def parents(self) -> list[ActionSubtask]:
        return [self.origin_task.find_subtask(parent_id) for parent_id in self.parent_ids]

    @property
    def children(self) -> list[ActionSubtask]:
        return [self.origin_task.find_subtask(child_id) for child_id in self.child_ids]

    def attach_to(self, parent_task: BaseTask):
        self.parent_task_id = parent_task.id
        self.structure = parent_task.structure
        self.__init_from_prompt(self.input.to_text())

    def before_run(self) -> None:
        self.structure.publish_event(StartActionSubtaskEvent.from_task(self))
        self.structure.logger.info(f"Subtask {self.id}\n{self.input.to_text()}")

    def run(self) -> BaseArtifact:
        try:
            if self.action_name == "error":
                self.output = ErrorArtifact(str(self.action_input))
            else:
                if self._tool:
                    response = self._tool.execute(getattr(self._tool, self.action_path), self)
                else:
                    response = ErrorArtifact("tool not found")

                self.output = response
        except Exception as e:
            self.structure.logger.error(f"Subtask {self.id}\n{e}", exc_info=True)

            self.output = ErrorArtifact(str(e))
        finally:
            return self.output

    def after_run(self) -> None:
        response = self.output.to_text() if isinstance(self.output, BaseArtifact) else str(self.output)

        self.structure.publish_event(FinishActionSubtaskEvent.from_task(self))
        self.structure.logger.info(f"Subtask {self.id}\nResponse: {response}")

    def action_to_json(self) -> str:
        json_dict = {}

        if self.action_name:
            json_dict["name"] = self.action_name

        if self.action_path:
            json_dict["path"] = self.action_path

        if self.action_input:
            json_dict["input"] = self.action_input

        return json.dumps(json_dict)

    def add_child(self, child: ActionSubtask) -> ActionSubtask:
        if child.id not in self.child_ids:
            self.child_ids.append(child.id)

        if self.id not in child.parent_ids:
            child.parent_ids.append(self.id)

        return child

    def add_parent(self, parent: ActionSubtask) -> ActionSubtask:
        if parent.id not in self.parent_ids:
            self.parent_ids.append(parent.id)

        if self.id not in parent.child_ids:
            parent.child_ids.append(self.id)

        return parent

    def __init_from_prompt(self, value: str) -> None:
        thought_matches = re.findall(self.THOUGHT_PATTERN, value, re.MULTILINE)
        action_matches = re.findall(self.ACTION_PATTERN, value, re.DOTALL)
        answer_matches = re.findall(self.ANSWER_PATTERN, value, re.MULTILINE)

        if self.thought is None and len(thought_matches) > 0:
            self.thought = thought_matches[-1]

        if len(action_matches) > 0:
            try:
                data = action_matches[-1]
                action_object: dict = json.loads(data, strict=False)

                validate(instance=action_object, schema=self.ACTION_SCHEMA.schema)

                # Load action name; throw exception if the key is not present
                if self.action_name is None:
                    self.action_name = action_object["name"]

                # Load action method; throw exception if the key is not present
                if self.action_path is None:
                    self.action_path = action_object["path"]

                # Load optional input value; don't throw exceptions if key is not present
                if self.action_input is None and "input" in action_object:
                    # The schema library has a bug, where something like `Or(str, None)` doesn't get
                    # correctly translated into JSON schema. For some optional input fields LLMs sometimes
                    # still provide null value, which trips up the validator. The temporary solution that
                    # works is to strip all key-values where value is null.
                    self.action_input = remove_null_values_in_dict_recursively(action_object["input"])

                # Load the action itself
                if self.action_name:
                    self._tool = self.origin_task.find_tool(self.action_name)

                if self._tool:
                    self.__validate_action_input(self.action_input, self._tool)
            except SyntaxError as e:
                self.structure.logger.error(f"Subtask {self.origin_task.id}\nSyntax error: {e}")

                self.action_name = "error"
                self.action_input = {"error": f"syntax error: {e}"}
            except ValidationError as e:
                self.structure.logger.error(f"Subtask {self.origin_task.id}\nInvalid action JSON: {e}")

                self.action_name = "error"
                self.action_input = {"error": f"Action JSON validation error: {e}"}
            except Exception as e:
                self.structure.logger.error(f"Subtask {self.origin_task.id}\nError parsing tool action: {e}")

                self.action_name = "error"
                self.action_input = {"error": f"Action input parsing error: {e}"}
        elif self.output is None and len(answer_matches) > 0:
            self.output = TextArtifact(answer_matches[-1])

    def __validate_action_input(self, action_input: dict, mixin: ActivityMixin) -> None:
        try:
            activity_schema = mixin.activity_schema(getattr(mixin, self.action_path))

            if activity_schema:
                validate(instance=action_input, schema=activity_schema)
        except ValidationError as e:
            self.structure.logger.error(f"Subtask {self.origin_task.id}\nInvalid activity input JSON: {e}")

            self.action_name = "error"
            self.action_input = {"error": f"Activity input JSON validation error: {e}"}

ACTION_PATTERN = '(?s)Action:[^{]*({.*})' class-attribute instance-attribute

ACTION_SCHEMA = Schema(description='Actions have name, path, and input object.', schema={Literal('name', description='Action name'): str, Literal('path', description='Action path'): str, schema.Optional(Literal('input', description='Optional action path input values object')): {'values': dict}}) class-attribute instance-attribute

ANSWER_PATTERN = '(?s)^Answer:\\s?([\\s\\S]*)$' class-attribute instance-attribute

THOUGHT_PATTERN = '(?s)^Thought:\\s*(.*?)$' class-attribute instance-attribute

action_input: dict | None = field(default=None, kw_only=True) class-attribute instance-attribute

action_name: str | None = field(default=None, kw_only=True) class-attribute instance-attribute

action_path: str | None = field(default=None, kw_only=True) class-attribute instance-attribute

children: list[ActionSubtask] property

input: TextArtifact property

origin_task: ActionSubtaskOriginMixin | None property

parent_task_id: str | None = field(default=None, kw_only=True) class-attribute instance-attribute

parents: list[ActionSubtask] property

thought: str | None = field(default=None, kw_only=True) class-attribute instance-attribute

__init_from_prompt(value)

Source code in griptape/griptape/tasks/action_subtask.py
def __init_from_prompt(self, value: str) -> None:
    thought_matches = re.findall(self.THOUGHT_PATTERN, value, re.MULTILINE)
    action_matches = re.findall(self.ACTION_PATTERN, value, re.DOTALL)
    answer_matches = re.findall(self.ANSWER_PATTERN, value, re.MULTILINE)

    if self.thought is None and len(thought_matches) > 0:
        self.thought = thought_matches[-1]

    if len(action_matches) > 0:
        try:
            data = action_matches[-1]
            action_object: dict = json.loads(data, strict=False)

            validate(instance=action_object, schema=self.ACTION_SCHEMA.schema)

            # Load action name; throw exception if the key is not present
            if self.action_name is None:
                self.action_name = action_object["name"]

            # Load action method; throw exception if the key is not present
            if self.action_path is None:
                self.action_path = action_object["path"]

            # Load optional input value; don't throw exceptions if key is not present
            if self.action_input is None and "input" in action_object:
                # The schema library has a bug, where something like `Or(str, None)` doesn't get
                # correctly translated into JSON schema. For some optional input fields LLMs sometimes
                # still provide null value, which trips up the validator. The temporary solution that
                # works is to strip all key-values where value is null.
                self.action_input = remove_null_values_in_dict_recursively(action_object["input"])

            # Load the action itself
            if self.action_name:
                self._tool = self.origin_task.find_tool(self.action_name)

            if self._tool:
                self.__validate_action_input(self.action_input, self._tool)
        except SyntaxError as e:
            self.structure.logger.error(f"Subtask {self.origin_task.id}\nSyntax error: {e}")

            self.action_name = "error"
            self.action_input = {"error": f"syntax error: {e}"}
        except ValidationError as e:
            self.structure.logger.error(f"Subtask {self.origin_task.id}\nInvalid action JSON: {e}")

            self.action_name = "error"
            self.action_input = {"error": f"Action JSON validation error: {e}"}
        except Exception as e:
            self.structure.logger.error(f"Subtask {self.origin_task.id}\nError parsing tool action: {e}")

            self.action_name = "error"
            self.action_input = {"error": f"Action input parsing error: {e}"}
    elif self.output is None and len(answer_matches) > 0:
        self.output = TextArtifact(answer_matches[-1])

__validate_action_input(action_input, mixin)

Source code in griptape/griptape/tasks/action_subtask.py
def __validate_action_input(self, action_input: dict, mixin: ActivityMixin) -> None:
    try:
        activity_schema = mixin.activity_schema(getattr(mixin, self.action_path))

        if activity_schema:
            validate(instance=action_input, schema=activity_schema)
    except ValidationError as e:
        self.structure.logger.error(f"Subtask {self.origin_task.id}\nInvalid activity input JSON: {e}")

        self.action_name = "error"
        self.action_input = {"error": f"Activity input JSON validation error: {e}"}

action_to_json()

Source code in griptape/griptape/tasks/action_subtask.py
def action_to_json(self) -> str:
    json_dict = {}

    if self.action_name:
        json_dict["name"] = self.action_name

    if self.action_path:
        json_dict["path"] = self.action_path

    if self.action_input:
        json_dict["input"] = self.action_input

    return json.dumps(json_dict)

add_child(child)

Source code in griptape/griptape/tasks/action_subtask.py
def add_child(self, child: ActionSubtask) -> ActionSubtask:
    if child.id not in self.child_ids:
        self.child_ids.append(child.id)

    if self.id not in child.parent_ids:
        child.parent_ids.append(self.id)

    return child

add_parent(parent)

Source code in griptape/griptape/tasks/action_subtask.py
def add_parent(self, parent: ActionSubtask) -> ActionSubtask:
    if parent.id not in self.parent_ids:
        self.parent_ids.append(parent.id)

    if self.id not in parent.child_ids:
        parent.child_ids.append(self.id)

    return parent

after_run()

Source code in griptape/griptape/tasks/action_subtask.py
def after_run(self) -> None:
    response = self.output.to_text() if isinstance(self.output, BaseArtifact) else str(self.output)

    self.structure.publish_event(FinishActionSubtaskEvent.from_task(self))
    self.structure.logger.info(f"Subtask {self.id}\nResponse: {response}")

attach_to(parent_task)

Source code in griptape/griptape/tasks/action_subtask.py
def attach_to(self, parent_task: BaseTask):
    self.parent_task_id = parent_task.id
    self.structure = parent_task.structure
    self.__init_from_prompt(self.input.to_text())

before_run()

Source code in griptape/griptape/tasks/action_subtask.py
def before_run(self) -> None:
    self.structure.publish_event(StartActionSubtaskEvent.from_task(self))
    self.structure.logger.info(f"Subtask {self.id}\n{self.input.to_text()}")

run()

Source code in griptape/griptape/tasks/action_subtask.py
def run(self) -> BaseArtifact:
    try:
        if self.action_name == "error":
            self.output = ErrorArtifact(str(self.action_input))
        else:
            if self._tool:
                response = self._tool.execute(getattr(self._tool, self.action_path), self)
            else:
                response = ErrorArtifact("tool not found")

            self.output = response
    except Exception as e:
        self.structure.logger.error(f"Subtask {self.id}\n{e}", exc_info=True)

        self.output = ErrorArtifact(str(e))
    finally:
        return self.output

BaseImageGenerationTask

Bases: ImageArtifactFileOutputMixin, RuleMixin, BaseTask, ABC

Provides a base class for image generation-related tasks.

Attributes:

Name Type Description
negative_rulesets list[Ruleset]

List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.

negative_rules list[Rule]

List of negatively-weighted rules applied to the text prompt, if supported by the driver.

output_dir list[Rule]

If provided, the generated image will be written to disk in output_dir.

output_file list[Rule]

If provided, the generated image will be written to disk as output_file.

Source code in griptape/griptape/tasks/base_image_generation_task.py
@define
class BaseImageGenerationTask(ImageArtifactFileOutputMixin, RuleMixin, BaseTask, ABC):
    """Provides a base class for image generation-related tasks.

    Attributes:
        negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.
        negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver.
        output_dir: If provided, the generated image will be written to disk in output_dir.
        output_file: If provided, the generated image will be written to disk as output_file.
    """

    NEGATIVE_RULESET_NAME = "Negative Ruleset"

    negative_rulesets: list[Ruleset] = field(factory=list, kw_only=True)
    negative_rules: list[Rule] = field(factory=list, kw_only=True)

    @negative_rulesets.validator  # pyright: ignore
    def validate_negative_rulesets(self, _, negative_rulesets: list[Ruleset]) -> None:
        if not negative_rulesets:
            return

        if self.negative_rules:
            raise ValueError("Can't have both negative_rulesets and negative_rules specified.")

    @negative_rules.validator  # pyright: ignore
    def validate_negative_rules(self, _, negative_rules: list[Rule]) -> None:
        if not negative_rules:
            return

        if self.negative_rulesets:
            raise ValueError("Can't have both negative_rules and negative_rulesets specified.")

    @property
    def all_negative_rulesets(self) -> list[Ruleset]:
        task_rulesets = []
        if self.negative_rulesets:
            task_rulesets = self.negative_rulesets

        elif self.negative_rules:
            task_rulesets = [Ruleset(name=self.NEGATIVE_RULESET_NAME, rules=self.negative_rules)]

        return task_rulesets

    def _read_from_file(self, path: str) -> ImageArtifact:
        self.structure.logger.info(f"Reading image from {os.path.abspath(path)}")
        return ImageLoader().load(path)

NEGATIVE_RULESET_NAME = 'Negative Ruleset' class-attribute instance-attribute

all_negative_rulesets: list[Ruleset] property

negative_rules: list[Rule] = field(factory=list, kw_only=True) class-attribute instance-attribute

negative_rulesets: list[Ruleset] = field(factory=list, kw_only=True) class-attribute instance-attribute

validate_negative_rules(_, negative_rules)

Source code in griptape/griptape/tasks/base_image_generation_task.py
@negative_rules.validator  # pyright: ignore
def validate_negative_rules(self, _, negative_rules: list[Rule]) -> None:
    if not negative_rules:
        return

    if self.negative_rulesets:
        raise ValueError("Can't have both negative_rules and negative_rulesets specified.")

validate_negative_rulesets(_, negative_rulesets)

Source code in griptape/griptape/tasks/base_image_generation_task.py
@negative_rulesets.validator  # pyright: ignore
def validate_negative_rulesets(self, _, negative_rulesets: list[Ruleset]) -> None:
    if not negative_rulesets:
        return

    if self.negative_rules:
        raise ValueError("Can't have both negative_rulesets and negative_rules specified.")

BaseTask

Bases: ABC

Source code in griptape/griptape/tasks/base_task.py
@define
class BaseTask(ABC):
    class State(Enum):
        PENDING = 1
        EXECUTING = 2
        FINISHED = 3

    id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True)
    state: State = field(default=State.PENDING, kw_only=True)
    parent_ids: list[str] = field(factory=list, kw_only=True)
    child_ids: list[str] = field(factory=list, kw_only=True)
    max_meta_memory_entries: int | None = field(default=20, kw_only=True)

    output: BaseArtifact | None = field(default=None, init=False)
    structure: Structure | None = field(default=None, init=False)
    context: dict[str, Any] = field(factory=dict, kw_only=True)

    @property
    @abstractmethod
    def input(self) -> BaseArtifact | tuple[BaseArtifact, ...]:
        ...

    @property
    def parents(self) -> list[BaseTask]:
        return [self.structure.find_task(parent_id) for parent_id in self.parent_ids]

    @property
    def children(self) -> list[BaseTask]:
        return [self.structure.find_task(child_id) for child_id in self.child_ids]

    @property
    def meta_memories(self) -> list[BaseMetaEntry]:
        if self.structure and self.structure.meta_memory:
            if self.max_meta_memory_entries:
                return self.structure.meta_memory.entries[: self.max_meta_memory_entries]
            else:
                return self.structure.meta_memory.entries
        else:
            return []

    def __str__(self) -> str:
        return str(self.output.value)

    def preprocess(self, structure: Structure) -> BaseTask:
        self.structure = structure

        return self

    def is_pending(self) -> bool:
        return self.state == BaseTask.State.PENDING

    def is_finished(self) -> bool:
        return self.state == BaseTask.State.FINISHED

    def is_executing(self) -> bool:
        return self.state == BaseTask.State.EXECUTING

    def before_run(self) -> None:
        if self.structure:
            self.structure.publish_event(StartTaskEvent.from_task(self))

    def after_run(self) -> None:
        if self.structure:
            self.structure.publish_event(FinishTaskEvent.from_task(self))

    def execute(self) -> BaseArtifact | None:
        try:
            self.state = BaseTask.State.EXECUTING

            self.before_run()

            self.output = self.run()

            self.after_run()
        except Exception as e:
            self.structure.logger.error(f"{self.__class__.__name__} {self.id}\n{e}", exc_info=True)

            self.output = ErrorArtifact(str(e))
        finally:
            self.state = BaseTask.State.FINISHED

            return self.output

    def can_execute(self) -> bool:
        return self.state == BaseTask.State.PENDING and all(parent.is_finished() for parent in self.parents)

    def reset(self) -> BaseTask:
        self.state = BaseTask.State.PENDING
        self.output = None

        return self

    @abstractmethod
    def run(self) -> BaseArtifact:
        ...

    @property
    def full_context(self) -> dict[str, Any]:
        if self.structure:
            structure_context = self.structure.context(self)

            structure_context.update(self.context)

            return structure_context
        else:
            return {}

child_ids: list[str] = field(factory=list, kw_only=True) class-attribute instance-attribute

children: list[BaseTask] property

context: dict[str, Any] = field(factory=dict, kw_only=True) class-attribute instance-attribute

full_context: dict[str, Any] property

id: str = field(default=Factory(lambda : uuid.uuid4().hex), kw_only=True) class-attribute instance-attribute

input: BaseArtifact | tuple[BaseArtifact, ...] abstractmethod property

max_meta_memory_entries: int | None = field(default=20, kw_only=True) class-attribute instance-attribute

meta_memories: list[BaseMetaEntry] property

output: BaseArtifact | None = field(default=None, init=False) class-attribute instance-attribute

parent_ids: list[str] = field(factory=list, kw_only=True) class-attribute instance-attribute

parents: list[BaseTask] property

state: State = field(default=State.PENDING, kw_only=True) class-attribute instance-attribute

structure: Structure | None = field(default=None, init=False) class-attribute instance-attribute

State

Bases: Enum

Source code in griptape/griptape/tasks/base_task.py
class State(Enum):
    PENDING = 1
    EXECUTING = 2
    FINISHED = 3
EXECUTING = 2 class-attribute instance-attribute
FINISHED = 3 class-attribute instance-attribute
PENDING = 1 class-attribute instance-attribute

__str__()

Source code in griptape/griptape/tasks/base_task.py
def __str__(self) -> str:
    return str(self.output.value)

after_run()

Source code in griptape/griptape/tasks/base_task.py
def after_run(self) -> None:
    if self.structure:
        self.structure.publish_event(FinishTaskEvent.from_task(self))

before_run()

Source code in griptape/griptape/tasks/base_task.py
def before_run(self) -> None:
    if self.structure:
        self.structure.publish_event(StartTaskEvent.from_task(self))

can_execute()

Source code in griptape/griptape/tasks/base_task.py
def can_execute(self) -> bool:
    return self.state == BaseTask.State.PENDING and all(parent.is_finished() for parent in self.parents)

execute()

Source code in griptape/griptape/tasks/base_task.py
def execute(self) -> BaseArtifact | None:
    try:
        self.state = BaseTask.State.EXECUTING

        self.before_run()

        self.output = self.run()

        self.after_run()
    except Exception as e:
        self.structure.logger.error(f"{self.__class__.__name__} {self.id}\n{e}", exc_info=True)

        self.output = ErrorArtifact(str(e))
    finally:
        self.state = BaseTask.State.FINISHED

        return self.output

is_executing()

Source code in griptape/griptape/tasks/base_task.py
def is_executing(self) -> bool:
    return self.state == BaseTask.State.EXECUTING

is_finished()

Source code in griptape/griptape/tasks/base_task.py
def is_finished(self) -> bool:
    return self.state == BaseTask.State.FINISHED

is_pending()

Source code in griptape/griptape/tasks/base_task.py
def is_pending(self) -> bool:
    return self.state == BaseTask.State.PENDING

preprocess(structure)

Source code in griptape/griptape/tasks/base_task.py
def preprocess(self, structure: Structure) -> BaseTask:
    self.structure = structure

    return self

reset()

Source code in griptape/griptape/tasks/base_task.py
def reset(self) -> BaseTask:
    self.state = BaseTask.State.PENDING
    self.output = None

    return self

run() abstractmethod

Source code in griptape/griptape/tasks/base_task.py
@abstractmethod
def run(self) -> BaseArtifact:
    ...

BaseTextInputTask

Bases: RuleMixin, BaseTask, ABC

Source code in griptape/griptape/tasks/base_text_input_task.py
@define
class BaseTextInputTask(RuleMixin, BaseTask, ABC):
    DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}"

    _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field(default=DEFAULT_INPUT_TEMPLATE)

    @property
    def input(self) -> TextArtifact:
        if isinstance(self._input, TextArtifact):
            return self._input
        elif isinstance(self._input, Callable):
            return self._input(self)
        else:
            return TextArtifact(J2().render_from_string(self._input, **self.full_context))

    @input.setter
    def input(self, value: str | TextArtifact | Callable[[BaseTask], TextArtifact]) -> None:
        self._input = value

    def before_run(self) -> None:
        super().before_run()

        self.structure.logger.info(f"{self.__class__.__name__} {self.id}\nInput: {self.input.to_text()}")

    def after_run(self) -> None:
        super().after_run()

        self.structure.logger.info(f"{self.__class__.__name__} {self.id}\nOutput: {self.output.to_text()}")

DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}' class-attribute instance-attribute

input: TextArtifact property writable

after_run()

Source code in griptape/griptape/tasks/base_text_input_task.py
def after_run(self) -> None:
    super().after_run()

    self.structure.logger.info(f"{self.__class__.__name__} {self.id}\nOutput: {self.output.to_text()}")

before_run()

Source code in griptape/griptape/tasks/base_text_input_task.py
def before_run(self) -> None:
    super().before_run()

    self.structure.logger.info(f"{self.__class__.__name__} {self.id}\nInput: {self.input.to_text()}")

CodeExecutionTask

Bases: BaseTextInputTask

Source code in griptape/griptape/tasks/code_execution_task.py
@define
class CodeExecutionTask(BaseTextInputTask):
    run_fn: Callable[[CodeExecutionTask], BaseArtifact] = field(kw_only=True)

    def run(self) -> BaseArtifact:
        try:
            return self.run_fn(self)
        except Exception as e:
            return ErrorArtifact(f"error during Code Execution Task: {e}")

run_fn: Callable[[CodeExecutionTask], BaseArtifact] = field(kw_only=True) class-attribute instance-attribute

run()

Source code in griptape/griptape/tasks/code_execution_task.py
def run(self) -> BaseArtifact:
    try:
        return self.run_fn(self)
    except Exception as e:
        return ErrorArtifact(f"error during Code Execution Task: {e}")

ExtractionTask

Bases: BaseTextInputTask

Source code in griptape/griptape/tasks/extraction_task.py
@define
class ExtractionTask(BaseTextInputTask):
    extraction_engine: BaseExtractionEngine = field(kw_only=True)
    args: dict = field(kw_only=True)

    def run(self) -> ListArtifact:
        return self.extraction_engine.extract(self.input.to_text(), rulesets=self.all_rulesets, **self.args)

args: dict = field(kw_only=True) class-attribute instance-attribute

extraction_engine: BaseExtractionEngine = field(kw_only=True) class-attribute instance-attribute

run()

Source code in griptape/griptape/tasks/extraction_task.py
def run(self) -> ListArtifact:
    return self.extraction_engine.extract(self.input.to_text(), rulesets=self.all_rulesets, **self.args)

InpaintingImageGenerationTask

Bases: BaseImageGenerationTask

A task that modifies a select region within an image using a mask. Accepts a text prompt, image, and mask as input in one of the following formats: - tuple of (template string, ImageArtifact, ImageArtifact) - tuple of (TextArtifact, ImageArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact)

Attributes:

Name Type Description
image_generation_engine InpaintingImageGenerationEngine

The engine used to generate the image.

negative_rulesets InpaintingImageGenerationEngine

List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.

negative_rules InpaintingImageGenerationEngine

List of negatively-weighted rules applied to the text prompt, if supported by the driver.

output_dir InpaintingImageGenerationEngine

If provided, the generated image will be written to disk in output_dir.

output_file InpaintingImageGenerationEngine

If provided, the generated image will be written to disk as output_file.

Source code in griptape/griptape/tasks/inpainting_image_generation_task.py
@define
class InpaintingImageGenerationTask(BaseImageGenerationTask):
    """A task that modifies a select region within an image using a mask. Accepts a text prompt, image, and mask as
    input in one of the following formats:
    - tuple of (template string, ImageArtifact, ImageArtifact)
    - tuple of (TextArtifact, ImageArtifact, ImageArtifact)
    - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact)

    Attributes:
        image_generation_engine: The engine used to generate the image.
        negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.
        negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver.
        output_dir: If provided, the generated image will be written to disk in output_dir.
        output_file: If provided, the generated image will be written to disk as output_file.
    """

    image_generation_engine: InpaintingImageGenerationEngine = field(kw_only=True)
    _input: tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[
        [BaseTask], tuple[TextArtifact, ImageArtifact, ImageArtifact]
    ] = field(default=None)

    @property
    def input(self) -> tuple[TextArtifact, ImageArtifact, ImageArtifact]:
        if isinstance(self._input, Tuple):
            if isinstance(self._input[0], TextArtifact):
                input_text = self._input[0]
            else:
                input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context))

            return input_text, self._input[1], self._input[2]
        elif isinstance(self._input, Callable):
            return self._input(self)
        else:
            raise ValueError("Input must be a tuple of (text, image, mask) or a callable that returns such a tuple.")

    @input.setter
    def input(self, value: tuple[TextArtifact, ImageArtifact, ImageArtifact]) -> None:
        self._input = value

    def run(self) -> ImageArtifact:
        prompt_artifact = self.input[0]
        image_artifact = self.input[1]
        mask_artifact = self.input[2]

        output_image_artifact = self.image_generation_engine.run(
            prompts=[prompt_artifact.to_text()],
            image=image_artifact,
            mask=mask_artifact,
            rulesets=self.all_rulesets,
            negative_rulesets=self.negative_rulesets,
        )

        if self.output_dir or self.output_file:
            self._write_to_file(output_image_artifact)

        return output_image_artifact

image_generation_engine: InpaintingImageGenerationEngine = field(kw_only=True) class-attribute instance-attribute

input: tuple[TextArtifact, ImageArtifact, ImageArtifact] property writable

run()

Source code in griptape/griptape/tasks/inpainting_image_generation_task.py
def run(self) -> ImageArtifact:
    prompt_artifact = self.input[0]
    image_artifact = self.input[1]
    mask_artifact = self.input[2]

    output_image_artifact = self.image_generation_engine.run(
        prompts=[prompt_artifact.to_text()],
        image=image_artifact,
        mask=mask_artifact,
        rulesets=self.all_rulesets,
        negative_rulesets=self.negative_rulesets,
    )

    if self.output_dir or self.output_file:
        self._write_to_file(output_image_artifact)

    return output_image_artifact

OutpaintingImageGenerationTask

Bases: BaseImageGenerationTask

A task that modifies an image outside the bounds of a mask. Accepts a text prompt, image, and mask as input in one of the following formats: - tuple of (template string, ImageArtifact, ImageArtifact) - tuple of (TextArtifact, ImageArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact)

Attributes:

Name Type Description
image_generation_engine OutpaintingImageGenerationEngine

The engine used to generate the image.

negative_rulesets OutpaintingImageGenerationEngine

List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.

negative_rules OutpaintingImageGenerationEngine

List of negatively-weighted rules applied to the text prompt, if supported by the driver.

output_dir OutpaintingImageGenerationEngine

If provided, the generated image will be written to disk in output_dir.

output_file OutpaintingImageGenerationEngine

If provided, the generated image will be written to disk as output_file.

Source code in griptape/griptape/tasks/outpainting_image_generation_task.py
@define
class OutpaintingImageGenerationTask(BaseImageGenerationTask):
    """A task that modifies an image outside the bounds of a mask. Accepts a text prompt, image, and mask as
    input in one of the following formats:
    - tuple of (template string, ImageArtifact, ImageArtifact)
    - tuple of (TextArtifact, ImageArtifact, ImageArtifact)
    - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact)

    Attributes:
        image_generation_engine: The engine used to generate the image.
        negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.
        negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver.
        output_dir: If provided, the generated image will be written to disk in output_dir.
        output_file: If provided, the generated image will be written to disk as output_file.
    """

    image_generation_engine: OutpaintingImageGenerationEngine = field(kw_only=True)
    _input: tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[
        [BaseTask], tuple[TextArtifact, ImageArtifact, ImageArtifact]
    ] = field(default=None)

    @property
    def input(self) -> tuple[TextArtifact, ImageArtifact, ImageArtifact]:
        if isinstance(self._input, Tuple):
            if isinstance(self._input[0], TextArtifact):
                input_text = self._input[0]
            else:
                input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context))

            return input_text, self._input[1], self._input[2]
        elif isinstance(self._input, Callable):
            return self._input(self)
        else:
            raise ValueError("Input must be a tuple of (text, image, mask) or a callable that returns such a tuple.")

    @input.setter
    def input(self, value: tuple[TextArtifact, ImageArtifact, ImageArtifact]) -> None:
        self._input = value

    def run(self) -> ImageArtifact:
        prompt_artifact = self.input[0]
        image_artifact = self.input[1]
        mask_artifact = self.input[2]

        output_image_artifact = self.image_generation_engine.run(
            prompts=[prompt_artifact.to_text()],
            image=image_artifact,
            mask=mask_artifact,
            rulesets=self.all_rulesets,
            negative_rulesets=self.negative_rulesets,
        )

        if self.output_dir or self.output_file:
            self._write_to_file(output_image_artifact)

        return output_image_artifact

image_generation_engine: OutpaintingImageGenerationEngine = field(kw_only=True) class-attribute instance-attribute

input: tuple[TextArtifact, ImageArtifact, ImageArtifact] property writable

run()

Source code in griptape/griptape/tasks/outpainting_image_generation_task.py
def run(self) -> ImageArtifact:
    prompt_artifact = self.input[0]
    image_artifact = self.input[1]
    mask_artifact = self.input[2]

    output_image_artifact = self.image_generation_engine.run(
        prompts=[prompt_artifact.to_text()],
        image=image_artifact,
        mask=mask_artifact,
        rulesets=self.all_rulesets,
        negative_rulesets=self.negative_rulesets,
    )

    if self.output_dir or self.output_file:
        self._write_to_file(output_image_artifact)

    return output_image_artifact

PromptImageGenerationTask

Bases: BaseImageGenerationTask

Used to generate an image from a text prompt. Accepts prompt as input in one of the following formats: - template string - TextArtifact - Callable that returns a TextArtifact

Attributes:

Name Type Description
image_generation_engine PromptImageGenerationEngine

The engine used to generate the image.

negative_rulesets PromptImageGenerationEngine

List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.

negative_rules PromptImageGenerationEngine

List of negatively-weighted rules applied to the text prompt, if supported by the driver.

output_dir PromptImageGenerationEngine

If provided, the generated image will be written to disk in output_dir.

output_file PromptImageGenerationEngine

If provided, the generated image will be written to disk as output_file.

Source code in griptape/griptape/tasks/prompt_image_generation_task.py
@define
class PromptImageGenerationTask(BaseImageGenerationTask):
    """Used to generate an image from a text prompt. Accepts prompt as input in one of the following formats:
    - template string
    - TextArtifact
    - Callable that returns a TextArtifact

    Attributes:
        image_generation_engine: The engine used to generate the image.
        negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.
        negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver.
        output_dir: If provided, the generated image will be written to disk in output_dir.
        output_file: If provided, the generated image will be written to disk as output_file.
    """

    DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}"

    _input: str | TextArtifact | Callable[[BaseTask], TextArtifact] = field(default=DEFAULT_INPUT_TEMPLATE)
    image_generation_engine: PromptImageGenerationEngine = field(kw_only=True)

    @property
    def input(self) -> TextArtifact:
        if isinstance(self._input, TextArtifact):
            return self._input
        elif isinstance(self._input, Callable):
            return self._input(self)
        else:
            return TextArtifact(J2().render_from_string(self._input, **self.full_context))

    @input.setter
    def input(self, value: TextArtifact) -> None:
        self._input = value

    def run(self) -> ImageArtifact:
        image_artifact = self.image_generation_engine.run(
            prompts=[self.input.to_text()], rulesets=self.all_rulesets, negative_rulesets=self.negative_rulesets
        )

        if self.output_dir or self.output_file:
            self._write_to_file(image_artifact)

        return image_artifact

DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}' class-attribute instance-attribute

image_generation_engine: PromptImageGenerationEngine = field(kw_only=True) class-attribute instance-attribute

input: TextArtifact property writable

run()

Source code in griptape/griptape/tasks/prompt_image_generation_task.py
def run(self) -> ImageArtifact:
    image_artifact = self.image_generation_engine.run(
        prompts=[self.input.to_text()], rulesets=self.all_rulesets, negative_rulesets=self.negative_rulesets
    )

    if self.output_dir or self.output_file:
        self._write_to_file(image_artifact)

    return image_artifact

PromptTask

Bases: BaseTextInputTask

Source code in griptape/griptape/tasks/prompt_task.py
@define
class PromptTask(BaseTextInputTask):
    prompt_driver: BasePromptDriver | None = field(default=None, kw_only=True)
    generate_system_template: Callable[[PromptTask], str] = field(
        default=Factory(lambda self: self.default_system_template_generator, takes_self=True), kw_only=True
    )

    output: TextArtifact | ErrorArtifact | InfoArtifact | None = field(default=None, init=False)

    @property
    def prompt_stack(self) -> PromptStack:
        stack = PromptStack()
        memory = self.structure.conversation_memory

        stack.add_system_input(self.generate_system_template(self))

        stack.add_user_input(self.input.to_text())

        if self.output:
            stack.add_assistant_input(self.output.to_text())

        if memory:
            # inserting at index 1 to place memory right after system prompt
            stack.add_conversation_memory(memory, 1)

        return stack

    def default_system_template_generator(self, _: PromptTask) -> str:
        return J2("tasks/prompt_task/system.j2").render(
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets)
        )

    def run(self) -> TextArtifact | InfoArtifact | ErrorArtifact:
        self.output = self.active_driver().run(self.prompt_stack)

        return self.output

    def active_driver(self) -> BasePromptDriver:
        if self.prompt_driver is None:
            return self.structure.prompt_driver
        else:
            return self.prompt_driver

generate_system_template: Callable[[PromptTask], str] = field(default=Factory(lambda : self.default_system_template_generator, takes_self=True), kw_only=True) class-attribute instance-attribute

output: TextArtifact | ErrorArtifact | InfoArtifact | None = field(default=None, init=False) class-attribute instance-attribute

prompt_driver: BasePromptDriver | None = field(default=None, kw_only=True) class-attribute instance-attribute

prompt_stack: PromptStack property

active_driver()

Source code in griptape/griptape/tasks/prompt_task.py
def active_driver(self) -> BasePromptDriver:
    if self.prompt_driver is None:
        return self.structure.prompt_driver
    else:
        return self.prompt_driver

default_system_template_generator(_)

Source code in griptape/griptape/tasks/prompt_task.py
def default_system_template_generator(self, _: PromptTask) -> str:
    return J2("tasks/prompt_task/system.j2").render(
        rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets)
    )

run()

Source code in griptape/griptape/tasks/prompt_task.py
def run(self) -> TextArtifact | InfoArtifact | ErrorArtifact:
    self.output = self.active_driver().run(self.prompt_stack)

    return self.output

TextQueryTask

Bases: BaseTextInputTask

Source code in griptape/griptape/tasks/text_query_task.py
@define
class TextQueryTask(BaseTextInputTask):
    query_engine: BaseQueryEngine = field(kw_only=True)
    loader: TextLoader = field(default=Factory(lambda: TextLoader()), kw_only=True)
    namespace: Optional[str] = field(default=None, kw_only=True)

    def run(self) -> TextArtifact:
        return self.query_engine.query(self.input.to_text(), namespace=self.namespace, rulesets=self.all_rulesets)

loader: TextLoader = field(default=Factory(lambda : TextLoader()), kw_only=True) class-attribute instance-attribute

namespace: Optional[str] = field(default=None, kw_only=True) class-attribute instance-attribute

query_engine: BaseQueryEngine = field(kw_only=True) class-attribute instance-attribute

run()

Source code in griptape/griptape/tasks/text_query_task.py
def run(self) -> TextArtifact:
    return self.query_engine.query(self.input.to_text(), namespace=self.namespace, rulesets=self.all_rulesets)

TextSummaryTask

Bases: BaseTextInputTask

Source code in griptape/griptape/tasks/text_summary_task.py
@define
class TextSummaryTask(BaseTextInputTask):
    summary_engine: BaseSummaryEngine = field(kw_only=True, default=Factory(lambda: PromptSummaryEngine()))

    def run(self) -> TextArtifact:
        return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.all_rulesets))

summary_engine: BaseSummaryEngine = field(kw_only=True, default=Factory(lambda : PromptSummaryEngine())) class-attribute instance-attribute

run()

Source code in griptape/griptape/tasks/text_summary_task.py
def run(self) -> TextArtifact:
    return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.all_rulesets))

ToolTask

Bases: PromptTask, ActionSubtaskOriginMixin

Source code in griptape/griptape/tasks/tool_task.py
@define
class ToolTask(PromptTask, ActionSubtaskOriginMixin):
    tool: BaseTool = field(kw_only=True)
    subtask: ActionSubtask | None = field(default=None, kw_only=True)
    task_memory: TaskMemory | None = field(default=None, kw_only=True)

    def __attrs_post_init__(self) -> None:
        self.set_default_tools_memory(self.task_memory)

    def preprocess(self, structure: Structure) -> ToolTask:
        super().preprocess(structure)

        if self.task_memory is None:
            self.set_default_tools_memory(structure.task_memory)

        return self

    def default_system_template_generator(self, _: PromptTask) -> str:
        return J2("tasks/tool_task/system.j2").render(
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets),
            action_schema=utils.minify_json(json.dumps(self.tool.schema())),
            meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories),
        )

    def run(self) -> TextArtifact:
        prompt_output = self.active_driver().run(prompt_stack=self.prompt_stack).to_text()

        subtask = self.add_subtask(ActionSubtask(f"Action: {prompt_output}"))

        subtask.before_run()
        subtask.run()
        subtask.after_run()

        if subtask.output:
            self.output = subtask.output
        else:
            self.output = InfoArtifact("No tool output")

        return self.output

    def find_tool(self, tool_name: str) -> BaseTool | None:
        if self.tool.name == tool_name:
            return self.tool
        else:
            return None

    def find_memory(self, memory_name: str) -> TaskMemory | None:
        return None

    def find_subtask(self, subtask_id: str) -> ActionSubtask | None:
        return self.subtask if self.subtask.id == subtask_id else None

    def add_subtask(self, subtask: ActionSubtask) -> ActionSubtask:
        self.subtask = subtask
        self.subtask.attach_to(self)

        return self.subtask

    def set_default_tools_memory(self, memory: TaskMemory) -> None:
        self.task_memory = memory

        if self.task_memory:
            if self.tool.input_memory is None:
                self.tool.input_memory = [self.task_memory]
            if self.tool.output_memory is None and self.tool.off_prompt:
                self.tool.output_memory = {a.name: [self.task_memory] for a in self.tool.activities()}

subtask: ActionSubtask | None = field(default=None, kw_only=True) class-attribute instance-attribute

task_memory: TaskMemory | None = field(default=None, kw_only=True) class-attribute instance-attribute

tool: BaseTool = field(kw_only=True) class-attribute instance-attribute

__attrs_post_init__()

Source code in griptape/griptape/tasks/tool_task.py
def __attrs_post_init__(self) -> None:
    self.set_default_tools_memory(self.task_memory)

add_subtask(subtask)

Source code in griptape/griptape/tasks/tool_task.py
def add_subtask(self, subtask: ActionSubtask) -> ActionSubtask:
    self.subtask = subtask
    self.subtask.attach_to(self)

    return self.subtask

default_system_template_generator(_)

Source code in griptape/griptape/tasks/tool_task.py
def default_system_template_generator(self, _: PromptTask) -> str:
    return J2("tasks/tool_task/system.j2").render(
        rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets),
        action_schema=utils.minify_json(json.dumps(self.tool.schema())),
        meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories),
    )

find_memory(memory_name)

Source code in griptape/griptape/tasks/tool_task.py
def find_memory(self, memory_name: str) -> TaskMemory | None:
    return None

find_subtask(subtask_id)

Source code in griptape/griptape/tasks/tool_task.py
def find_subtask(self, subtask_id: str) -> ActionSubtask | None:
    return self.subtask if self.subtask.id == subtask_id else None

find_tool(tool_name)

Source code in griptape/griptape/tasks/tool_task.py
def find_tool(self, tool_name: str) -> BaseTool | None:
    if self.tool.name == tool_name:
        return self.tool
    else:
        return None

preprocess(structure)

Source code in griptape/griptape/tasks/tool_task.py
def preprocess(self, structure: Structure) -> ToolTask:
    super().preprocess(structure)

    if self.task_memory is None:
        self.set_default_tools_memory(structure.task_memory)

    return self

run()

Source code in griptape/griptape/tasks/tool_task.py
def run(self) -> TextArtifact:
    prompt_output = self.active_driver().run(prompt_stack=self.prompt_stack).to_text()

    subtask = self.add_subtask(ActionSubtask(f"Action: {prompt_output}"))

    subtask.before_run()
    subtask.run()
    subtask.after_run()

    if subtask.output:
        self.output = subtask.output
    else:
        self.output = InfoArtifact("No tool output")

    return self.output

set_default_tools_memory(memory)

Source code in griptape/griptape/tasks/tool_task.py
def set_default_tools_memory(self, memory: TaskMemory) -> None:
    self.task_memory = memory

    if self.task_memory:
        if self.tool.input_memory is None:
            self.tool.input_memory = [self.task_memory]
        if self.tool.output_memory is None and self.tool.off_prompt:
            self.tool.output_memory = {a.name: [self.task_memory] for a in self.tool.activities()}

ToolkitTask

Bases: PromptTask, ActionSubtaskOriginMixin

Source code in griptape/griptape/tasks/toolkit_task.py
@define
class ToolkitTask(PromptTask, ActionSubtaskOriginMixin):
    DEFAULT_MAX_STEPS = 20

    tools: list[BaseTool] = field(factory=list, kw_only=True)
    max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True)
    task_memory: TaskMemory | None = field(default=None, kw_only=True)
    subtasks: list[ActionSubtask] = field(factory=list)
    generate_assistant_subtask_template: Callable[[ActionSubtask], str] = field(
        default=Factory(lambda self: self.default_assistant_subtask_template_generator, takes_self=True), kw_only=True
    )
    generate_user_subtask_template: Callable[[ActionSubtask], str] = field(
        default=Factory(lambda self: self.default_user_subtask_template_generator, takes_self=True), kw_only=True
    )

    def __attrs_post_init__(self) -> None:
        if self.task_memory:
            self.set_default_tools_memory(self.task_memory)

    @tools.validator  # pyright: ignore
    def validate_tools(self, _, tools: list[BaseTool]) -> None:
        tool_names = [t.name for t in tools]

        if len(tool_names) > len(set(tool_names)):
            raise ValueError("tools names have to be unique in task")

    @property
    def tool_output_memory(self) -> list[TaskMemory]:
        unique_memory_dict = {}

        for memories in [tool.output_memory for tool in self.tools if tool.output_memory]:
            for memory_list in memories.values():
                for memory in memory_list:
                    if memory.name not in unique_memory_dict:
                        unique_memory_dict[memory.name] = memory

        return list(unique_memory_dict.values())

    @property
    def prompt_stack(self) -> PromptStack:
        stack = PromptStack()
        memory = self.structure.conversation_memory

        stack.add_system_input(self.generate_system_template(self))

        stack.add_user_input(self.input.to_text())

        if self.output:
            stack.add_assistant_input(self.output.to_text())
        else:
            for s in self.subtasks:
                stack.add_assistant_input(self.generate_assistant_subtask_template(s))
                stack.add_user_input(self.generate_user_subtask_template(s))

        if memory:
            # inserting at index 1 to place memory right after system prompt
            stack.add_conversation_memory(memory, 1)

        return stack

    def preprocess(self, structure: Structure) -> ToolkitTask:
        super().preprocess(structure)

        if self.task_memory is None and structure.task_memory:
            self.set_default_tools_memory(structure.task_memory)

        return self

    def default_system_template_generator(self, _: PromptTask) -> str:
        return J2("tasks/toolkit_task/system.j2").render(
            rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets),
            action_names=str.join(", ", [tool.name for tool in self.tools]),
            action_schemas=[utils.minify_json(json.dumps(tool.schema())) for tool in self.tools],
            meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories),
            stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE,
        )

    def default_assistant_subtask_template_generator(self, subtask: ActionSubtask) -> str:
        return J2("tasks/toolkit_task/assistant_subtask.j2").render(
            stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE, subtask=subtask
        )

    def default_user_subtask_template_generator(self, subtask: ActionSubtask) -> str:
        return J2("tasks/toolkit_task/user_subtask.j2").render(
            stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE, subtask=subtask
        )

    def set_default_tools_memory(self, memory: TaskMemory) -> None:
        self.task_memory = memory

        for tool in self.tools:
            if self.task_memory:
                if tool.input_memory is None:
                    tool.input_memory = [self.task_memory]
                if tool.output_memory is None and tool.off_prompt:
                    tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()}

    def run(self) -> TextArtifact | InfoArtifact | ErrorArtifact:
        from griptape.tasks import ActionSubtask

        self.subtasks.clear()

        subtask = self.add_subtask(ActionSubtask(self.active_driver().run(prompt_stack=self.prompt_stack).to_text()))

        while True:
            if subtask.output is None:
                if len(self.subtasks) >= self.max_subtasks:
                    subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task")
                elif subtask.action_name is None:
                    # handle case when the LLM failed to follow the ReAct prompt and didn't return a proper action
                    subtask.output = subtask.input
                else:
                    subtask.before_run()
                    subtask.run()
                    subtask.after_run()

                    subtask = self.add_subtask(
                        ActionSubtask(self.active_driver().run(prompt_stack=self.prompt_stack).to_text())
                    )
            else:
                break

        self.output = subtask.output

        return self.output

    def find_subtask(self, subtask_id: str) -> ActionSubtask | None:
        return next((subtask for subtask in self.subtasks if subtask.id == subtask_id), None)

    def add_subtask(self, subtask: ActionSubtask) -> ActionSubtask:
        subtask.attach_to(self)

        if len(self.subtasks) > 0:
            self.subtasks[-1].add_child(subtask)

        self.subtasks.append(subtask)

        return subtask

    def find_tool(self, tool_name: str) -> BaseTool | None:
        return next((t for t in self.tools if t.name == tool_name), None)

    def find_memory(self, memory_name: str) -> TaskMemory | None:
        return next((m for m in self.tool_output_memory if m.name == memory_name), None)

DEFAULT_MAX_STEPS = 20 class-attribute instance-attribute

generate_assistant_subtask_template: Callable[[ActionSubtask], str] = field(default=Factory(lambda : self.default_assistant_subtask_template_generator, takes_self=True), kw_only=True) class-attribute instance-attribute

generate_user_subtask_template: Callable[[ActionSubtask], str] = field(default=Factory(lambda : self.default_user_subtask_template_generator, takes_self=True), kw_only=True) class-attribute instance-attribute

max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True) class-attribute instance-attribute

prompt_stack: PromptStack property

subtasks: list[ActionSubtask] = field(factory=list) class-attribute instance-attribute

task_memory: TaskMemory | None = field(default=None, kw_only=True) class-attribute instance-attribute

tool_output_memory: list[TaskMemory] property

tools: list[BaseTool] = field(factory=list, kw_only=True) class-attribute instance-attribute

__attrs_post_init__()

Source code in griptape/griptape/tasks/toolkit_task.py
def __attrs_post_init__(self) -> None:
    if self.task_memory:
        self.set_default_tools_memory(self.task_memory)

add_subtask(subtask)

Source code in griptape/griptape/tasks/toolkit_task.py
def add_subtask(self, subtask: ActionSubtask) -> ActionSubtask:
    subtask.attach_to(self)

    if len(self.subtasks) > 0:
        self.subtasks[-1].add_child(subtask)

    self.subtasks.append(subtask)

    return subtask

default_assistant_subtask_template_generator(subtask)

Source code in griptape/griptape/tasks/toolkit_task.py
def default_assistant_subtask_template_generator(self, subtask: ActionSubtask) -> str:
    return J2("tasks/toolkit_task/assistant_subtask.j2").render(
        stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE, subtask=subtask
    )

default_system_template_generator(_)

Source code in griptape/griptape/tasks/toolkit_task.py
def default_system_template_generator(self, _: PromptTask) -> str:
    return J2("tasks/toolkit_task/system.j2").render(
        rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.all_rulesets),
        action_names=str.join(", ", [tool.name for tool in self.tools]),
        action_schemas=[utils.minify_json(json.dumps(tool.schema())) for tool in self.tools],
        meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories),
        stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE,
    )

default_user_subtask_template_generator(subtask)

Source code in griptape/griptape/tasks/toolkit_task.py
def default_user_subtask_template_generator(self, subtask: ActionSubtask) -> str:
    return J2("tasks/toolkit_task/user_subtask.j2").render(
        stop_sequence=utils.constants.RESPONSE_STOP_SEQUENCE, subtask=subtask
    )

find_memory(memory_name)

Source code in griptape/griptape/tasks/toolkit_task.py
def find_memory(self, memory_name: str) -> TaskMemory | None:
    return next((m for m in self.tool_output_memory if m.name == memory_name), None)

find_subtask(subtask_id)

Source code in griptape/griptape/tasks/toolkit_task.py
def find_subtask(self, subtask_id: str) -> ActionSubtask | None:
    return next((subtask for subtask in self.subtasks if subtask.id == subtask_id), None)

find_tool(tool_name)

Source code in griptape/griptape/tasks/toolkit_task.py
def find_tool(self, tool_name: str) -> BaseTool | None:
    return next((t for t in self.tools if t.name == tool_name), None)

preprocess(structure)

Source code in griptape/griptape/tasks/toolkit_task.py
def preprocess(self, structure: Structure) -> ToolkitTask:
    super().preprocess(structure)

    if self.task_memory is None and structure.task_memory:
        self.set_default_tools_memory(structure.task_memory)

    return self

run()

Source code in griptape/griptape/tasks/toolkit_task.py
def run(self) -> TextArtifact | InfoArtifact | ErrorArtifact:
    from griptape.tasks import ActionSubtask

    self.subtasks.clear()

    subtask = self.add_subtask(ActionSubtask(self.active_driver().run(prompt_stack=self.prompt_stack).to_text()))

    while True:
        if subtask.output is None:
            if len(self.subtasks) >= self.max_subtasks:
                subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task")
            elif subtask.action_name is None:
                # handle case when the LLM failed to follow the ReAct prompt and didn't return a proper action
                subtask.output = subtask.input
            else:
                subtask.before_run()
                subtask.run()
                subtask.after_run()

                subtask = self.add_subtask(
                    ActionSubtask(self.active_driver().run(prompt_stack=self.prompt_stack).to_text())
                )
        else:
            break

    self.output = subtask.output

    return self.output

set_default_tools_memory(memory)

Source code in griptape/griptape/tasks/toolkit_task.py
def set_default_tools_memory(self, memory: TaskMemory) -> None:
    self.task_memory = memory

    for tool in self.tools:
        if self.task_memory:
            if tool.input_memory is None:
                tool.input_memory = [self.task_memory]
            if tool.output_memory is None and tool.off_prompt:
                tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()}

validate_tools(_, tools)

Source code in griptape/griptape/tasks/toolkit_task.py
@tools.validator  # pyright: ignore
def validate_tools(self, _, tools: list[BaseTool]) -> None:
    tool_names = [t.name for t in tools]

    if len(tool_names) > len(set(tool_names)):
        raise ValueError("tools names have to be unique in task")

VariationImageGenerationTask

Bases: BaseImageGenerationTask

A task that generates a variation of an image using a prompt. Accepts a text prompt and image as input in one of the following formats: - tuple of (template string, ImageArtifact) - tuple of (TextArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact)

Attributes:

Name Type Description
image_generation_engine VariationImageGenerationEngine

The engine used to generate the image.

negative_rulesets VariationImageGenerationEngine

List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.

negative_rules VariationImageGenerationEngine

List of negatively-weighted rules applied to the text prompt, if supported by the driver.

output_dir VariationImageGenerationEngine

If provided, the generated image will be written to disk in output_dir.

output_file VariationImageGenerationEngine

If provided, the generated image will be written to disk as output_file.

Source code in griptape/griptape/tasks/variation_image_generation_task.py
@define
class VariationImageGenerationTask(BaseImageGenerationTask):
    """A task that generates a variation of an image using a prompt. Accepts a text prompt and image as
    input in one of the following formats:
    - tuple of (template string, ImageArtifact)
    - tuple of (TextArtifact, ImageArtifact)
    - Callable that returns a tuple of (TextArtifact, ImageArtifact)

    Attributes:
        image_generation_engine: The engine used to generate the image.
        negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver.
        negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver.
        output_dir: If provided, the generated image will be written to disk in output_dir.
        output_file: If provided, the generated image will be written to disk as output_file.
    """

    image_generation_engine: VariationImageGenerationEngine = field(kw_only=True)
    _input: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], tuple[TextArtifact, ImageArtifact]] = field(
        default=None
    )

    @property
    def input(self) -> tuple[TextArtifact, ImageArtifact]:
        if isinstance(self._input, tuple):
            if isinstance(self._input[0], TextArtifact):
                input_text = self._input[0]
            else:
                input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context))

            return input_text, self._input[1]
        elif isinstance(self._input, Callable):
            return self._input(self)
        else:
            raise ValueError("Input must be a tuple of (text, image) or a callable that returns such a tuple.")

    @input.setter
    def input(self, value: tuple[TextArtifact, ImageArtifact]) -> None:
        self._input = value

    def run(self) -> ImageArtifact:
        prompt_artifact = self.input[0]
        image_artifact = self.input[1]

        output_image_artifact = self.image_generation_engine.run(
            prompts=[prompt_artifact.to_text()],
            image=image_artifact,
            rulesets=self.all_rulesets,
            negative_rulesets=self.negative_rulesets,
        )

        if self.output_dir or self.output_file:
            self._write_to_file(output_image_artifact)

        return output_image_artifact

image_generation_engine: VariationImageGenerationEngine = field(kw_only=True) class-attribute instance-attribute

input: tuple[TextArtifact, ImageArtifact] property writable

run()

Source code in griptape/griptape/tasks/variation_image_generation_task.py
def run(self) -> ImageArtifact:
    prompt_artifact = self.input[0]
    image_artifact = self.input[1]

    output_image_artifact = self.image_generation_engine.run(
        prompts=[prompt_artifact.to_text()],
        image=image_artifact,
        rulesets=self.all_rulesets,
        negative_rulesets=self.negative_rulesets,
    )

    if self.output_dir or self.output_file:
        self._write_to_file(output_image_artifact)

    return output_image_artifact