rigging.prompt
Treat empty function signatures as prompts for structured chat interfaces.
DEFAULT_DOC = 'Convert the following inputs to outputs ({func_name}).'
module-attribute
#
Default docstring if none is provided to a prompt function.
DEFAULT_MAX_ROUNDS = 3
module-attribute
#
Default maximum number of rounds for a prompt to run until outputs are parsed.
Ctx(tag: str | None = None, prefix: str | None = None, example: str | Model | None = None)
dataclass
#
Prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | None = None, attempt_recovery: bool = True, drop_dialog: bool = True, max_rounds: int = DEFAULT_MAX_ROUNDS, inputs: list[Input] = list(), output: Output = lambda: ChatOutput(id='chat', context=Ctx())(), watch_callbacks: list[WatchChatCallback] = list(), params: GenerateParams | None = None, api_tools: list[ApiTool] = list(), _generator_id: str | None = None, _generator: Generator | None = None, _pipeline: ChatPipeline | None = None, _docstring: str | None = None)
dataclass
#
Bases: Generic[P, R]
Prompts wrap hollow functions and create structured chat interfaces for passing inputs into a ChatPipeline and parsing outputs.
api_tools: list[ApiTool] = dataclasses.field(default_factory=list)
class-attribute
instance-attribute
#
The API tools to be made available when generating chats for this prompt.
attempt_recovery: bool = True
class-attribute
instance-attribute
#
Whether the prompt should attempt to recover from errors in output parsing.
docstring: str
property
#
The docstring for the prompt function.
drop_dialog: bool = True
class-attribute
instance-attribute
#
When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.
func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | None = None
class-attribute
instance-attribute
#
The function that the prompt was derived from.
inputs: list[Input] = dataclasses.field(default_factory=list)
class-attribute
instance-attribute
#
The structured input handlers for the prompt.
max_rounds: int = DEFAULT_MAX_ROUNDS
class-attribute
instance-attribute
#
The maximum number of rounds the prompt should try to reparse outputs.
output: Output = dataclasses.field(default_factory=lambda: ChatOutput(id='chat', context=Ctx()))
class-attribute
instance-attribute
#
The structured output handler for the prompt.
params: GenerateParams | None = None
class-attribute
instance-attribute
#
The parameters to be used when generating chats for this prompt.
pipeline: ChatPipeline | None
property
#
If available, the resolved Chat Pipeline for the prompt.
template: str
property
#
The dynamic jinja2 template for the prompt function.
watch_callbacks: list[WatchChatCallback] = dataclasses.field(default_factory=list)
class-attribute
instance-attribute
#
Callbacks to be passed any chats produced while executing this prompt.
bind(other: ChatPipeline | Generator | Chat | str) -> t.Callable[P, t.Coroutine[t.Any, t.Any, R]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")("the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[P, Coroutine[Any, Any, R]]
–A callable for executing this prompt
Source code in .deps/rigging/rigging/prompt.py
bind_many(other: ChatPipeline | Generator | Chat | str) -> t.Callable[Concatenate[int, P], t.Coroutine[t.Any, t.Any, list[R]]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run_many callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")(5, "the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[Concatenate[int, P], Coroutine[Any, Any, list[R]]]
–A callable for executing this prompt.
Source code in .deps/rigging/rigging/prompt.py
707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 |
|
bind_over(other: ChatPipeline | Generator | Chat | str | None = None) -> t.Callable[Concatenate[t.Sequence[Generator | str], P], t.Coroutine[t.Any, t.Any, list[R]]]
#
Binds the prompt to a pipeline, generator, or chat and returns a scoped run_over callable.
@rg.prompt
def say_hello(name: str) -> str:
"""Say hello to {{ name }}"""
await say_hello.bind("gpt-3.5-turbo")(["gpt-4o", "gpt-4"], "the world")
Parameters:
-
other
(ChatPipeline | Generator | Chat | str | None
, default:None
) –The pipeline, generator, generator id, or chat to bind to.
Returns:
-
Callable[Concatenate[Sequence[Generator | str], P], Coroutine[Any, Any, list[R]]]
–A callable for executing this prompt.
Source code in .deps/rigging/rigging/prompt.py
clone(*, skip_callbacks: bool = False) -> Prompt[P, R]
#
Creates a deep copy of this prompt.
Parameters:
-
skip_callbacks
(bool
, default:False
) –Whether to skip copying the watch callbacks.
Returns:
-
Prompt[P, R]
–A new instance of the prompt.
Source code in .deps/rigging/rigging/prompt.py
process(chat: Chat) -> R
#
Attempt to parse the output from a chat into the expected return type.
render(*args: P.args, **kwargs: P.kwargs) -> str
#
Pass the arguments to the jinja2 template and render the full prompt.
Source code in .deps/rigging/rigging/prompt.py
run(*args: P.args, **kwargs: P.kwargs) -> R
async
#
Use the prompt to run the function with the provided arguments and return the output.
Parameters:
-
*args
(args
, default:()
) –The positional arguments for the prompt function.
-
**kwargs
(kwargs
, default:{}
) –The keyword arguments for the prompt function.
Returns:
-
R
–The output of the prompt function.
Source code in .deps/rigging/rigging/prompt.py
run_many(count: int, /, *args: P.args, **kwargs: P.kwargs) -> list[R]
async
#
Use the prompt to run the function multiple times with the provided arguments and return the output.
Parameters:
-
count
(int
) –The number of times to run the prompt.
-
*args
(args
, default:()
) –The positional arguments for the prompt function.
-
**kwargs
(kwargs
, default:{}
) –The keyword arguments for the prompt function.
Returns:
-
list[R]
–The outputs of the prompt function.
Source code in .deps/rigging/rigging/prompt.py
run_over(generators: t.Sequence[Generator | str], /, *args: P.args, **kwargs: P.kwargs) -> list[R]
async
#
Executes the prompt process across multiple generators.
For each generator, a pipeline is created and the generator is replaced before the run call. All callbacks and parameters are preserved.
If this prompt has a pipeline assigned, it will be included in the run.
Warning
The implementation currently skips any failed chats and only processes successful chats. This may change in the future.
Parameters:
-
generators
(Sequence[Generator | str]
) –A sequence of generators to be used for the generation process.
Returns:
-
list[R]
–A list of generatated Chats.
Source code in .deps/rigging/rigging/prompt.py
set_(attempt_recovery: bool | None = None, drop_dialog: bool | None = None, max_rounds: int | None = None) -> Prompt[P, R]
#
Helper to allow updates to the parsing configuration.
Parameters:
-
attempt_recovery
(bool | None
, default:None
) –Whether the prompt should attempt to recover from errors in output parsing.
-
drop_dialog
(bool | None
, default:None
) –When attempting recovery, whether to drop intermediate dialog while parsing was being resolved.
-
max_rounds
(int | None
, default:None
) –The maximum number of rounds the prompt should try to reparse outputs.
Returns:
-
Prompt[P, R]
–Self
Source code in .deps/rigging/rigging/prompt.py
watch(*callbacks: WatchChatCallback) -> Prompt[P, R]
#
Registers a callback to monitor any chats produced for this prompt
Parameters:
-
*callbacks
(WatchChatCallback
, default:()
) –The callback functions to be executed.
async def log(chats: list[Chat]) -> None:
...
@rg.prompt()
async def summarize(text: str) -> str:
...
summarize.watch(log)(...)
async def log(chats: list[Chat]) -> None:
...
async def _summarize(text: str) -> str:
...
summarize = rg.prompt(_summarize).watch(log)
Returns:
-
Prompt[P, R]
–Self
Source code in .deps/rigging/rigging/prompt.py
with_(params: t.Optional[GenerateParams] = None, **kwargs: t.Any) -> Prompt[P, R]
#
Assign specific generation parameter overloads for this prompt.
Parameters:
-
params
(Optional[GenerateParams]
, default:None
) –The parameters to set for the underlying chat pipeline.
-
**kwargs
(Any
, default:{}
) –An alternative way to pass parameters as keyword arguments.
Returns:
-
Prompt[P, R]
–Self
Source code in .deps/rigging/rigging/prompt.py
make_prompt(content: str, return_type: type[R] | None = None, *, ctx: Ctx | None = None) -> Prompt[..., R] | Prompt[..., str]
#
Create a prompt at runtime from a basic string and return type (experimental).
import rigging as rg
write_joke = rg.make_prompt("Write a joke.", ctx=rg.Ctx(tag="joke"))
await write_joke.bind("gpt-4o-mini")()
Note
Adding input parameters is not currently supported. Instead use the rigging.prompt.prompt decorator.
Parameters:
-
content
(str
) –The docstring content for the prompt.
-
return_type
(type[R] | None
, default:None
) –The return type of the prompt function.
-
ctx
(Ctx | None
, default:None
) –Context for the return type (Use this instead of Annotated for better type hints).
Returns:
Source code in .deps/rigging/rigging/prompt.py
prompt(func: t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R] | None = None, /, *, pipeline: ChatPipeline | None = None, generator: Generator | None = None, generator_id: str | None = None, tools: list[ApiTool | t.Callable[..., t.Any]] | None = None) -> t.Callable[[t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]], Prompt[P, R]] | Prompt[P, R]
#
prompt(func: None = None, /, *, pipeline: ChatPipeline | None = None, generator: Generator | None = None, generator_id: str | None = None, tools: list[ApiTool | t.Callable[..., t.Any]] | None = None) -> t.Callable[[t.Callable[P, t.Coroutine[t.Any, t.Any, R]] | t.Callable[P, R]], Prompt[P, R]]
Convert a hollow function into a Prompt, which can be called directly or passed a chat pipeline to execute the function and parse the outputs.
from dataclasses import dataclass
import rigging as rg
@dataclass
class ExplainedJoke:
chat: rg.Chat
setup: str
punchline: str
explanation: str
@rg.prompt(generator_id="gpt-3.5-turbo")
async def write_joke(topic: str) -> ExplainedJoke:
"""Write a joke."""
...
await write_joke("programming")
Note
A docstring is not required, but this can be used to provide guidance to the model, or even handle any number of input transormations. Any input parameter which is not handled inside the docstring will be automatically added and formatted internally.
Note
Output parameters can be basic types, dataclasses, rigging models, lists, or tuples. Internal inspection will attempt to ensure your output types are valid, but there is no guarantee of complete coverage/safety. It's recommended to check rigging.prompt.Prompt.template to inspect the generated jinja2 template.
Note
If you annotate the return value of the function as a rigging.chat.Chat object, then no output parsing will take place and you can parse objects out manually.
You can also use Chat in any number of type annotation inside tuples or dataclasses. All instances will be filled with the final chat object transparently.
Note
All input parameters and output types can be annotated with the rigging.prompt.Ctx annotation to provide additional context for the prompt. This can be used to override the xml tag, provide a prefix string, or example content which will be placed inside output xml tags.
In the case of output parameters, especially in tuples, you might have xml tag collisions between the same basic types. Manually annotating xml tags with rigging.prompt.Ctx is recommended.
Parameters:
-
func
(Callable[P, Coroutine[Any, Any, R]] | Callable[P, R] | None
, default:None
) –The function to convert into a prompt.
-
pipeline
(ChatPipeline | None
, default:None
) –An optional pipeline to use for the prompt.
-
generator
(Generator | None
, default:None
) –An optional generator to use for the prompt.
-
generator_id
(str | None
, default:None
) –An optional generator id to use for the prompt.
-
tools
(list[ApiTool | Callable[..., Any]] | None
, default:None
) –An optional list of API tools to make available to the prompt (Native tools are not currently supported).
Returns:
-
Callable[[Callable[P, Coroutine[Any, Any, R]] | Callable[P, R]], Prompt[P, R]] | Prompt[P, R]
–A prompt instance or a function that can be used to create a prompt.
Source code in .deps/rigging/rigging/prompt.py
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 |
|