diff --git a/.codegen.json b/.codegen.json index c6ea4a09..f7b5805f 100644 --- a/.codegen.json +++ b/.codegen.json @@ -1 +1 @@ -{ "engineHash": "8243188", "specHash": "9919482", "version": "1.2.0" } +{ "engineHash": "8243188", "specHash": "871a814", "version": "1.2.0" } diff --git a/box_sdk_gen/managers/ai.py b/box_sdk_gen/managers/ai.py index 6a20cce4..432b6a5e 100644 --- a/box_sdk_gen/managers/ai.py +++ b/box_sdk_gen/managers/ai.py @@ -16,14 +16,16 @@ from typing import Union -from box_sdk_gen.internal.utils import DateTime +from box_sdk_gen.schemas.ai_dialogue_history import AiDialogueHistory -from box_sdk_gen.schemas.ai_response import AiResponse +from box_sdk_gen.schemas.ai_ask_response import AiAskResponse from box_sdk_gen.schemas.client_error import ClientError from box_sdk_gen.schemas.ai_ask import AiAsk +from box_sdk_gen.schemas.ai_response import AiResponse + from box_sdk_gen.schemas.ai_text_gen import AiTextGen from box_sdk_gen.schemas.ai_agent_ask import AiAgentAsk @@ -114,29 +116,6 @@ def __init__( self.content = content -class CreateAiTextGenDialogueHistory(BaseObject): - def __init__( - self, - *, - prompt: Optional[str] = None, - answer: Optional[str] = None, - created_at: Optional[DateTime] = None, - **kwargs - ): - """ - :param prompt: The prompt previously provided by the client and answered by the LLM., defaults to None - :type prompt: Optional[str], optional - :param answer: The answer previously provided by the LLM., defaults to None - :type answer: Optional[str], optional - :param created_at: The ISO date formatted timestamp of when the previous answer to the prompt was created., defaults to None - :type created_at: Optional[DateTime], optional - """ - super().__init__(**kwargs) - self.prompt = prompt - self.answer = answer - self.created_at = created_at - - class GetAiAgentDefaultConfigMode(str, Enum): ASK = 'ask' TEXT_GEN = 'text_gen' @@ -160,9 +139,11 @@ def create_ai_ask( prompt: str, items: List[CreateAiAskItems], *, + dialogue_history: Optional[List[AiDialogueHistory]] = None, + include_citations: Optional[bool] = None, ai_agent: Optional[AiAgentAsk] = None, extra_headers: Optional[Dict[str, Optional[str]]] = None - ) -> AiResponse: + ) -> AiAskResponse: """ Sends an AI request to supported LLMs and returns an answer specifically focused on the user's question given the provided context. :param mode: The mode specifies if this request is for a single or multiple items. If you select `single_item_qa` the `items` array can have one element only. Selecting `multiple_item_qa` allows you to provide up to 25 items. @@ -175,6 +156,10 @@ def create_ai_ask( If the file size exceeds 1MB, the first 1MB of text representation will be processed. If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only. :type items: List[CreateAiAskItems] + :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None + :type dialogue_history: Optional[List[AiDialogueHistory]], optional + :param include_citations: A flag to indicate whether citations should be returned., defaults to None + :type include_citations: Optional[bool], optional :param extra_headers: Extra headers that will be included in the HTTP request., defaults to None :type extra_headers: Optional[Dict[str, Optional[str]]], optional """ @@ -184,6 +169,8 @@ def create_ai_ask( 'mode': mode, 'prompt': prompt, 'items': items, + 'dialogue_history': dialogue_history, + 'include_citations': include_citations, 'ai_agent': ai_agent, } headers_map: Dict[str, str] = prepare_params({**extra_headers}) @@ -199,14 +186,14 @@ def create_ai_ask( network_session=self.network_session, ), ) - return deserialize(response.data, AiResponse) + return deserialize(response.data, AiAskResponse) def create_ai_text_gen( self, prompt: str, items: List[CreateAiTextGenItems], *, - dialogue_history: Optional[List[CreateAiTextGenDialogueHistory]] = None, + dialogue_history: Optional[List[AiDialogueHistory]] = None, ai_agent: Optional[AiAgentTextGen] = None, extra_headers: Optional[Dict[str, Optional[str]]] = None ) -> AiResponse: @@ -221,7 +208,7 @@ def create_ai_text_gen( If the file size exceeds 1MB, the first 1MB of text representation will be processed. :type items: List[CreateAiTextGenItems] :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None - :type dialogue_history: Optional[List[CreateAiTextGenDialogueHistory]], optional + :type dialogue_history: Optional[List[AiDialogueHistory]], optional :param extra_headers: Extra headers that will be included in the HTTP request., defaults to None :type extra_headers: Optional[Dict[str, Optional[str]]], optional """ diff --git a/box_sdk_gen/schemas/__init__.py b/box_sdk_gen/schemas/__init__.py index b682b5f9..e8956f2b 100644 --- a/box_sdk_gen/schemas/__init__.py +++ b/box_sdk_gen/schemas/__init__.py @@ -340,12 +340,18 @@ from box_sdk_gen.schemas.ai_agent_text_gen import * -from box_sdk_gen.schemas.ai_text_gen import * - from box_sdk_gen.schemas.ai_agent_basic_text_tool_ask import * from box_sdk_gen.schemas.ai_agent_ask import * +from box_sdk_gen.schemas.ai_citation import * + +from box_sdk_gen.schemas.ai_ask_response import * + +from box_sdk_gen.schemas.ai_dialogue_history import * + +from box_sdk_gen.schemas.ai_text_gen import * + from box_sdk_gen.schemas.ai_ask import * from box_sdk_gen.schemas.completion_rule_variable import * diff --git a/box_sdk_gen/schemas/ai_agent_ask.py b/box_sdk_gen/schemas/ai_agent_ask.py index 11ebfc5a..88cec1ec 100644 --- a/box_sdk_gen/schemas/ai_agent_ask.py +++ b/box_sdk_gen/schemas/ai_agent_ask.py @@ -19,7 +19,7 @@ class AiAgentAsk(BaseObject): def __init__( self, *, - type: Optional[AiAgentAskTypeField] = None, + type: AiAgentAskTypeField = AiAgentAskTypeField.AI_AGENT_ASK.value, long_text: Optional[AiAgentLongTextTool] = None, basic_text: Optional[AiAgentBasicTextToolAsk] = None, long_text_multi: Optional[AiAgentLongTextTool] = None, @@ -27,8 +27,8 @@ def __init__( **kwargs ): """ - :param type: The type of AI agent used to handle queries., defaults to None - :type type: Optional[AiAgentAskTypeField], optional + :param type: The type of AI agent used to handle queries., defaults to AiAgentAskTypeField.AI_AGENT_ASK.value + :type type: AiAgentAskTypeField, optional """ super().__init__(**kwargs) self.type = type diff --git a/box_sdk_gen/schemas/ai_agent_text_gen.py b/box_sdk_gen/schemas/ai_agent_text_gen.py index 930d2e79..0f183e7b 100644 --- a/box_sdk_gen/schemas/ai_agent_text_gen.py +++ b/box_sdk_gen/schemas/ai_agent_text_gen.py @@ -17,13 +17,13 @@ class AiAgentTextGen(BaseObject): def __init__( self, *, - type: Optional[AiAgentTextGenTypeField] = None, + type: AiAgentTextGenTypeField = AiAgentTextGenTypeField.AI_AGENT_TEXT_GEN.value, basic_gen: Optional[AiAgentBasicGenTool] = None, **kwargs ): """ - :param type: The type of AI agent used for generating text., defaults to None - :type type: Optional[AiAgentTextGenTypeField], optional + :param type: The type of AI agent used for generating text., defaults to AiAgentTextGenTypeField.AI_AGENT_TEXT_GEN.value + :type type: AiAgentTextGenTypeField, optional """ super().__init__(**kwargs) self.type = type diff --git a/box_sdk_gen/schemas/ai_ask.py b/box_sdk_gen/schemas/ai_ask.py index 307c66c1..146b3979 100644 --- a/box_sdk_gen/schemas/ai_ask.py +++ b/box_sdk_gen/schemas/ai_ask.py @@ -6,6 +6,8 @@ from typing import List +from box_sdk_gen.schemas.ai_dialogue_history import AiDialogueHistory + from box_sdk_gen.schemas.ai_agent_ask import AiAgentAsk @@ -50,6 +52,8 @@ def __init__( prompt: str, items: List[AiAskItemsField], *, + dialogue_history: Optional[List[AiDialogueHistory]] = None, + include_citations: Optional[bool] = None, ai_agent: Optional[AiAgentAsk] = None, **kwargs ): @@ -64,9 +68,15 @@ def __init__( If the file size exceeds 1MB, the first 1MB of text representation will be processed. If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only. :type items: List[AiAskItemsField] + :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None + :type dialogue_history: Optional[List[AiDialogueHistory]], optional + :param include_citations: A flag to indicate whether citations should be returned., defaults to None + :type include_citations: Optional[bool], optional """ super().__init__(**kwargs) self.mode = mode self.prompt = prompt self.items = items + self.dialogue_history = dialogue_history + self.include_citations = include_citations self.ai_agent = ai_agent diff --git a/box_sdk_gen/schemas/ai_ask_response.py b/box_sdk_gen/schemas/ai_ask_response.py new file mode 100644 index 00000000..2c8facd7 --- /dev/null +++ b/box_sdk_gen/schemas/ai_ask_response.py @@ -0,0 +1,36 @@ +from typing import Optional + +from typing import List + +from box_sdk_gen.internal.base_object import BaseObject + +from box_sdk_gen.schemas.ai_citation import AiCitation + +from box_sdk_gen.internal.utils import DateTime + + +class AiAskResponse(BaseObject): + def __init__( + self, + answer: str, + created_at: DateTime, + *, + completion_reason: Optional[str] = None, + citations: Optional[List[AiCitation]] = None, + **kwargs + ): + """ + :param answer: The answer provided by the LLM. + :type answer: str + :param created_at: The ISO date formatted timestamp of when the answer to the prompt was created. + :type created_at: DateTime + :param completion_reason: The reason the response finishes., defaults to None + :type completion_reason: Optional[str], optional + :param citations: The citations of the LLM's answer reference., defaults to None + :type citations: Optional[List[AiCitation]], optional + """ + super().__init__(**kwargs) + self.answer = answer + self.created_at = created_at + self.completion_reason = completion_reason + self.citations = citations diff --git a/box_sdk_gen/schemas/ai_citation.py b/box_sdk_gen/schemas/ai_citation.py new file mode 100644 index 00000000..affda29b --- /dev/null +++ b/box_sdk_gen/schemas/ai_citation.py @@ -0,0 +1,38 @@ +from enum import Enum + +from typing import Optional + +from box_sdk_gen.internal.base_object import BaseObject + + +class AiCitationTypeField(str, Enum): + FILE = 'file' + + +class AiCitation(BaseObject): + _discriminator = 'type', {'file'} + + def __init__( + self, + *, + content: Optional[str] = None, + id: Optional[str] = None, + type: Optional[AiCitationTypeField] = None, + name: Optional[str] = None, + **kwargs + ): + """ + :param content: The specific content from where the answer was referenced., defaults to None + :type content: Optional[str], optional + :param id: The id of the item., defaults to None + :type id: Optional[str], optional + :param type: The type of the item., defaults to None + :type type: Optional[AiCitationTypeField], optional + :param name: The name of the item., defaults to None + :type name: Optional[str], optional + """ + super().__init__(**kwargs) + self.content = content + self.id = id + self.type = type + self.name = name diff --git a/box_sdk_gen/schemas/ai_dialogue_history.py b/box_sdk_gen/schemas/ai_dialogue_history.py new file mode 100644 index 00000000..7cab9fe0 --- /dev/null +++ b/box_sdk_gen/schemas/ai_dialogue_history.py @@ -0,0 +1,28 @@ +from typing import Optional + +from box_sdk_gen.internal.base_object import BaseObject + +from box_sdk_gen.internal.utils import DateTime + + +class AiDialogueHistory(BaseObject): + def __init__( + self, + *, + prompt: Optional[str] = None, + answer: Optional[str] = None, + created_at: Optional[DateTime] = None, + **kwargs + ): + """ + :param prompt: The prompt previously provided by the client and answered by the LLM., defaults to None + :type prompt: Optional[str], optional + :param answer: The answer previously provided by the LLM., defaults to None + :type answer: Optional[str], optional + :param created_at: The ISO date formatted timestamp of when the previous answer to the prompt was created., defaults to None + :type created_at: Optional[DateTime], optional + """ + super().__init__(**kwargs) + self.prompt = prompt + self.answer = answer + self.created_at = created_at diff --git a/box_sdk_gen/schemas/ai_text_gen.py b/box_sdk_gen/schemas/ai_text_gen.py index 07d92259..32c200f4 100644 --- a/box_sdk_gen/schemas/ai_text_gen.py +++ b/box_sdk_gen/schemas/ai_text_gen.py @@ -6,9 +6,9 @@ from typing import List -from box_sdk_gen.schemas.ai_agent_text_gen import AiAgentTextGen +from box_sdk_gen.schemas.ai_dialogue_history import AiDialogueHistory -from box_sdk_gen.internal.utils import DateTime +from box_sdk_gen.schemas.ai_agent_text_gen import AiAgentTextGen class AiTextGenItemsTypeField(str, Enum): @@ -40,36 +40,13 @@ def __init__( self.content = content -class AiTextGenDialogueHistoryField(BaseObject): - def __init__( - self, - *, - prompt: Optional[str] = None, - answer: Optional[str] = None, - created_at: Optional[DateTime] = None, - **kwargs - ): - """ - :param prompt: The prompt previously provided by the client and answered by the LLM., defaults to None - :type prompt: Optional[str], optional - :param answer: The answer previously provided by the LLM., defaults to None - :type answer: Optional[str], optional - :param created_at: The ISO date formatted timestamp of when the previous answer to the prompt was created., defaults to None - :type created_at: Optional[DateTime], optional - """ - super().__init__(**kwargs) - self.prompt = prompt - self.answer = answer - self.created_at = created_at - - class AiTextGen(BaseObject): def __init__( self, prompt: str, items: List[AiTextGenItemsField], *, - dialogue_history: Optional[List[AiTextGenDialogueHistoryField]] = None, + dialogue_history: Optional[List[AiDialogueHistory]] = None, ai_agent: Optional[AiAgentTextGen] = None, **kwargs ): @@ -83,7 +60,7 @@ def __init__( If the file size exceeds 1MB, the first 1MB of text representation will be processed. :type items: List[AiTextGenItemsField] :param dialogue_history: The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response., defaults to None - :type dialogue_history: Optional[List[AiTextGenDialogueHistoryField]], optional + :type dialogue_history: Optional[List[AiDialogueHistory]], optional """ super().__init__(**kwargs) self.prompt = prompt diff --git a/docs/ai.md b/docs/ai.md index 96dce102..1a9e0b4c 100644 --- a/docs/ai.md +++ b/docs/ai.md @@ -37,6 +37,10 @@ client.ai.create_ai_ask( - The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. - items `List[CreateAiAskItems]` - The items to be processed by the LLM, often files. **Note**: Box AI handles documents with text representations up to 1MB in size, or a maximum of 25 files, whichever comes first. If the file size exceeds 1MB, the first 1MB of text representation will be processed. If you set `mode` parameter to `single_item_qa`, the `items` array can have one element only. +- dialogue_history `Optional[List[AiDialogueHistory]]` + - The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. +- include_citations `Optional[bool]` + - A flag to indicate whether citations should be returned. - ai_agent `Optional[AiAgentAsk]` - - extra_headers `Optional[Dict[str, Optional[str]]]` @@ -44,7 +48,7 @@ client.ai.create_ai_ask( ### Returns -This function returns a value of type `AiResponse`. +This function returns a value of type `AiAskResponse`. A successful response including the answer from the LLM. @@ -65,12 +69,12 @@ client.ai.create_ai_text_gen( ) ], dialogue_history=[ - CreateAiTextGenDialogueHistory( + AiDialogueHistory( prompt="What does the earth go around?", answer="The sun", created_at=date_time_from_string("2021-01-01T00:00:00Z"), ), - CreateAiTextGenDialogueHistory( + AiDialogueHistory( prompt="On Earth, where does the sun rise?", answer="East", created_at=date_time_from_string("2021-01-01T00:00:00Z"), @@ -86,7 +90,7 @@ client.ai.create_ai_text_gen( - The prompt provided by the client to be answered by the LLM. The prompt's length is limited to 10000 characters. - items `List[CreateAiTextGenItems]` - The items to be processed by the LLM, often files. The array can include **exactly one** element. **Note**: Box AI handles documents with text representations up to 1MB in size. If the file size exceeds 1MB, the first 1MB of text representation will be processed. -- dialogue_history `Optional[List[CreateAiTextGenDialogueHistory]]` +- dialogue_history `Optional[List[AiDialogueHistory]]` - The history of prompts and answers previously passed to the LLM. This provides additional context to the LLM in generating the response. - ai_agent `Optional[AiAgentTextGen]` - diff --git a/test/ai.py b/test/ai.py index fef03f0f..ee47b822 100644 --- a/test/ai.py +++ b/test/ai.py @@ -6,7 +6,7 @@ from box_sdk_gen.schemas.file_full import FileFull -from box_sdk_gen.schemas.ai_response import AiResponse +from box_sdk_gen.schemas.ai_ask_response import AiAskResponse from box_sdk_gen.managers.ai import CreateAiAskMode @@ -14,11 +14,13 @@ from box_sdk_gen.managers.ai import CreateAiAskItemsTypeField +from box_sdk_gen.schemas.ai_response import AiResponse + from box_sdk_gen.managers.ai import CreateAiTextGenItems from box_sdk_gen.managers.ai import CreateAiTextGenItemsTypeField -from box_sdk_gen.managers.ai import CreateAiTextGenDialogueHistory +from box_sdk_gen.schemas.ai_dialogue_history import AiDialogueHistory from test.commons import get_default_client @@ -44,7 +46,7 @@ def testAskAISingleItem(): GetAiAgentDefaultConfigMode.ASK.value, language='en-US' ) file_to_ask: FileFull = upload_new_file() - response: AiResponse = client.ai.create_ai_ask( + response: AiAskResponse = client.ai.create_ai_ask( CreateAiAskMode.SINGLE_ITEM_QA.value, 'which direction sun rises', [ @@ -64,7 +66,7 @@ def testAskAISingleItem(): def testAskAIMultipleItems(): file_to_ask_1: FileFull = upload_new_file() file_to_ask_2: FileFull = upload_new_file() - response: AiResponse = client.ai.create_ai_ask( + response: AiAskResponse = client.ai.create_ai_ask( CreateAiAskMode.MULTIPLE_ITEM_QA.value, 'Which direction sun rises?', [ @@ -101,12 +103,12 @@ def testAITextGenWithDialogueHistory(): ) ], dialogue_history=[ - CreateAiTextGenDialogueHistory( + AiDialogueHistory( prompt='What does the earth go around?', answer='The sun', created_at=date_time_from_string('2021-01-01T00:00:00Z'), ), - CreateAiTextGenDialogueHistory( + AiDialogueHistory( prompt='On Earth, where does the sun rise?', answer='East', created_at=date_time_from_string('2021-01-01T00:00:00Z'),