Skip to content

Commit

Permalink
Version 1.2.4
Browse files Browse the repository at this point in the history
  • Loading branch information
Austin Zielman committed Apr 26, 2024
1 parent 9f18560 commit df9c04f
Show file tree
Hide file tree
Showing 306 changed files with 3,001 additions and 1,502 deletions.
2 changes: 1 addition & 1 deletion abacusai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@
from .streaming_client import StreamingClient


__version__ = "1.2.2"
__version__ = "1.2.4"
1 change: 1 addition & 0 deletions abacusai/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ class Agent(AbstractApiClass):
agentExecutionConfig (dict): The config for arguments used to execute the agent.
latestAgentVersion (AgentVersion): The latest agent version.
codeSource (CodeSource): If a python model, information on the source code
workflowGraph (WorkflowGraph): The workflow graph for the agent.
"""

def __init__(self, client, name=None, agentId=None, createdAt=None, projectId=None, notebookId=None, predictFunctionName=None, sourceCode=None, agentConfig=None, memory=None, trainingRequired=None, agentExecutionConfig=None, codeSource={}, latestAgentVersion={}, workflowGraph={}):
Expand Down
9 changes: 4 additions & 5 deletions abacusai/agent_data_upload_result.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
from .agent_data_document_info import AgentDataDocumentInfo
from .return_class import AbstractApiClass


Expand All @@ -8,12 +7,12 @@ class AgentDataUploadResult(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
docInfos (AgentDataDocumentInfo): A list of dict for information on the documents uploaded to agent.
docInfos (list[agentdatadocumentinfo]): A list of dict for information on the documents uploaded to agent.
"""

def __init__(self, client, docInfos={}):
def __init__(self, client, docInfos=None):
super().__init__(client, None)
self.doc_infos = client._build_class(AgentDataDocumentInfo, docInfos)
self.doc_infos = docInfos
self.deprecated_keys = {}

def __repr__(self):
Expand All @@ -30,5 +29,5 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
resp = {'doc_infos': self._get_attribute_as_dict(self.doc_infos)}
resp = {'doc_infos': self.doc_infos}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
1 change: 1 addition & 0 deletions abacusai/agent_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class AgentVersion(AbstractApiClass):
error (str): Relevant error if the status is FAILED.
agentExecutionConfig (dict): The config for arguments used to execute the agent.
codeSource (CodeSource): If a python model, information on where the source code is located.
workflowGraph (WorkflowGraph): The workflow graph for the agent.
"""

def __init__(self, client, agentVersion=None, status=None, agentId=None, agentConfig=None, publishingStartedAt=None, publishingCompletedAt=None, pendingDeploymentIds=None, failedDeploymentIds=None, error=None, agentExecutionConfig=None, codeSource={}, workflowGraph={}):
Expand Down
49 changes: 38 additions & 11 deletions abacusai/api_class/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
import re
from abc import ABC
from copy import deepcopy
from typing import Any
from textwrap import dedent
from typing import Any, Callable, get_origin, get_type_hints

from .enums import ApiEnum

Expand Down Expand Up @@ -34,6 +35,16 @@ def snake_case(value):
return value


def get_clean_function_source_code(func: Callable):
sample_lambda = (lambda: 0)
if isinstance(func, type(sample_lambda)) and func.__name__ == sample_lambda.__name__:
raise ValueError('Lambda function not allowed.')
source_code = inspect.getsource(func)
# If function source code has some initial indentation, remove it (Ex - can happen if the functor was defined inside a function)
source_code = dedent(source_code)
return source_code


@dataclasses.dataclass
class ApiClass(ABC):
_upper_snake_case_keys: bool = dataclasses.field(default=False, repr=False, init=False)
Expand Down Expand Up @@ -111,12 +122,12 @@ def to_dict_helper(api_class_obj):
res = {}
api_class_dict = vars(api_class_obj)
if self._support_kwargs:
kwargs = api_class_dict.pop('kwargs', None)
kwargs = api_class_dict.get('kwargs', None)
api_class_dict.update(kwargs or {})
for k, v in api_class_dict.items():
if not k.startswith('__'):
k = upper_snake_case(k) if self._upper_snake_case_keys else camel_case(k)
if v is not None:
if v is not None and k != 'kwargs':
if not k.startswith('__'):
k = upper_snake_case(k) if self._upper_snake_case_keys else camel_case(k)
if isinstance(v, ApiClass):
res[k] = to_dict_helper(v)
elif isinstance(v, list):
Expand All @@ -125,17 +136,19 @@ def to_dict_helper(api_class_obj):
res[k] = {key: to_dict_helper(val) if isinstance(val, ApiClass) else val for key, val in v.items()}
elif isinstance(v, datetime.datetime) or isinstance(v, datetime.date):
res[k] = v.isoformat() if v else v
elif isinstance(v, ApiEnum):
res[k] = v.value
else:
if isinstance(v, ApiEnum):
res[k] = v.value
else:
res[k] = v
res[k] = v
return res

return to_dict_helper(self)

@classmethod
def from_dict(cls, input_dict: dict):
if input_dict is None:
return None
obj = None
if input_dict:
if builder := cls._get_builder():
config_class_key = None
Expand All @@ -152,14 +165,28 @@ def from_dict(cls, input_dict: dict):
if config_class_key not in input_dict_with_config_key and camel_case(config_class_key) not in input_dict_with_config_key:
input_dict_with_config_key[config_class_key] = value

return builder.from_dict(input_dict_with_config_key)
obj = builder.from_dict(input_dict_with_config_key)

if not cls._upper_snake_case_keys:
input_dict = {snake_case(k): v for k, v in input_dict.items()}
if not cls._support_kwargs:
# only use keys that are valid fields in the ApiClass
field_names = set((field.name) for field in dataclasses.fields(cls))
input_dict = {k: v for k, v in input_dict.items() if k in field_names}
return cls(**input_dict)
if obj is None:
obj = cls(**input_dict)

for attr_name, attr_type in get_type_hints(cls).items():
if attr_name in input_dict and inspect.isclass(attr_type) and issubclass(attr_type, ApiClass):
setattr(obj, attr_name, attr_type.from_dict(input_dict[attr_name]))
elif attr_name in input_dict and get_origin(attr_type) is list and attr_type.__args__ and inspect.isclass(attr_type.__args__[0]) and issubclass(attr_type.__args__[0], ApiClass):
class_type = attr_type.__args__[0]
if isinstance(input_dict[attr_name], list):
setattr(obj, attr_name, [class_type.from_dict(item) for item in input_dict[attr_name]])
else:
raise ValueError(f'Expected list for {attr_name} but got {type(input_dict[attr_name])}')

return obj


@dataclasses.dataclass
Expand Down
30 changes: 15 additions & 15 deletions abacusai/api_class/ai_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from typing import List, Union

from . import enums
from .abstract import ApiClass
from .abstract import ApiClass, get_clean_function_source_code


@dataclasses.dataclass
Expand All @@ -14,7 +14,7 @@ class FieldDescriptor(ApiClass):
field (str): The field to be extracted. This will be used as the key in the response.
description (str): The description of this field. If not included, the response_field will be used.
example_extraction (Union[str, int, bool, float]): An example of this extracted field.
type (enums.FieldDescriptorType): The type of this field. If not provided, the default type is STRING.
type (FieldDescriptorType): The type of this field. If not provided, the default type is STRING.
"""
field: str = dataclasses.field()
description: str = dataclasses.field(default=None)
Expand All @@ -29,20 +29,23 @@ class WorkflowNodeInputMapping(ApiClass):
Args:
name (str): The name of the input.
variable_type (str): The type of the input.
workflow_variable_source (str): The workflow source stage of the input.
variable_type (WorkflowNodeInputType): The type of the input.
variable_source (str): The name of the node this variable is sourced from.
If the type is `WORKFLOW_VARIABLE`, the value given by the source node will be directly used.
If the type is `USER_INPUT`, the value given by the source node will be used as the default initial value before user edits it.
Set to `None` if the type is `USER_INPUT` and the variable doesn't need a pre-filled initial value.
is_required (bool): Whether the input is required.
"""
name: str
variable_type: enums.WorkflowNodeInputType
workflow_variable_source: str = dataclasses.field(default=None)
variable_source: str = dataclasses.field(default=None)
is_required: bool = dataclasses.field(default=True)

def to_dict(self):
return {
'name': self.name,
'variable_type': self.variable_type,
'workflow_variable_source': self.workflow_variable_source,
'variable_source': self.variable_source,
'is_required': self.is_required
}

Expand All @@ -54,7 +57,7 @@ class WorkflowNodeOutputMapping(ApiClass):
Args:
name (str): The name of the output.
variable_type (str): The type of the output.
variable_type (WorkflowNodeOutputType): The type of the output.
"""
name: str
variable_type: enums.WorkflowNodeOutputType = dataclasses.field(default=enums.WorkflowNodeOutputType.STRING)
Expand All @@ -72,22 +75,19 @@ class WorkflowGraphNode(ApiClass):
A node in an Agent workflow graph.
Args:
name (str): Display name of the worflow node.
name (str): A unique name for the workflow node.
input_mappings (List[WorkflowNodeInputMapping]): List of input mappings for the node.
output_mappings (List[WorkflowNodeOutputMapping]): List of output mappings for the node.
function (callable): The callable node function reference if available.
function_name (str): The name of the function if available.
source_code (str): The source code of the function if available.
function (callable): The callable node function reference.
input_schema (dict): The react json schema for the input form if applicable.
output_schema (dict): The react json schema for the output if applicable.
package_requirements (list): List of package requirements for the node.
"""

def __init__(self, name: str, input_mappings: List[WorkflowNodeInputMapping], output_mappings: List[WorkflowNodeOutputMapping], function: callable = None, function_name: str = None, source_code: str = None, input_schema: dict = None, output_schema: dict = None, package_requirements: list = None):
if function:
import inspect
self.function_name = function.__name__
self.source_code = inspect.getsource(function)
self.source_code = get_clean_function_source_code(function)
elif function_name and source_code:
self.function_name = function_name
self.source_code = source_code
Expand Down Expand Up @@ -133,8 +133,8 @@ class WorkflowGraphEdge(ApiClass):
An edge in an Agent workflow graph.
Args:
source (str): The source node of the edge.
target (str): The target node of the edge.
source (str): The name of the source node of the edge.
target (str): The name of the target node of the edge.
details (dict): Additional details about the edge.
"""
source: str
Expand Down
12 changes: 6 additions & 6 deletions abacusai/api_class/batch_prediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class ForecastingBatchPredictionArgs(BatchPredictionArgs):
forecasting_horizon (int): The number of timestamps to predict in the future. Range: [1, 1000].
item_attributes_to_include_in_the_result (list): List of columns to include in the prediction output.
explain_predictions (bool): If True, calculates explanations for the forecasted values along with predictions.
automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
create_monitor (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
predictions_start_date: str = dataclasses.field(default=None)
Expand All @@ -41,7 +41,7 @@ class ForecastingBatchPredictionArgs(BatchPredictionArgs):
forecasting_horizon: int = dataclasses.field(default=None)
item_attributes_to_include_in_the_result: list = dataclasses.field(default=None)
explain_predictions: bool = dataclasses.field(default=None)
automate_monitoring: bool = dataclasses.field(default=None)
create_monitor: bool = dataclasses.field(default=None)

def __post_init__(self):
self.problem_type = enums.ProblemType.FORECASTING
Expand Down Expand Up @@ -100,7 +100,7 @@ class PredictiveModelingBatchPredictionArgs(BatchPredictionArgs):
explanation_filter_label (str): For classification problems specifies the label to which the explanation bounds are applied.
output_columns (list): A list of column names to include in the prediction result.
explain_predictions (bool): If True, calculates explanations for the predicted values along with predictions.
automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
create_monitor (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
explainer_type: enums.ExplainerType = dataclasses.field(default=None)
Expand All @@ -113,7 +113,7 @@ class PredictiveModelingBatchPredictionArgs(BatchPredictionArgs):
explanation_filter_label: str = dataclasses.field(default=None)
output_columns: list = dataclasses.field(default=None)
explain_predictions: bool = dataclasses.field(default=None)
automate_monitoring: bool = dataclasses.field(default=None)
create_monitor: bool = dataclasses.field(default=None)

def __post_init__(self):
self.problem_type = enums.ProblemType.PREDICTIVE_MODELING
Expand Down Expand Up @@ -194,10 +194,10 @@ class TrainablePlugAndPlayBatchPredictionArgs(BatchPredictionArgs):
Args:
for_eval (bool): If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
automate_monitoring (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
create_monitor (bool): Controls whether to automatically create a monitor to calculate the drift each time the batch prediction is run. Defaults to true if not specified.
"""
for_eval: bool = dataclasses.field(default=None)
automate_monitoring: bool = dataclasses.field(default=None)
create_monitor: bool = dataclasses.field(default=None)

def __post_init__(self):
self.problem_type = enums.ProblemType.CUSTOM_ALGORITHM
Expand Down
17 changes: 12 additions & 5 deletions abacusai/api_class/dataset_application_connector.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,13 @@
class DatasetConfig(ApiClass):
"""
An abstract class for dataset configs specific to application connectors.
Args:
application_connector_type(enums.ApplicationConnectorType): The type of application connector
is_documentset (bool): Whether the dataset is a document set
"""
application_connector_type: enums.ApplicationConnectorType = dataclasses.field(default=None, repr=False, init=False)
is_documentset: bool = dataclasses.field(default=None)

@classmethod
def _get_builder(cls):
Expand All @@ -21,12 +26,16 @@ class ConfluenceDatasetConfig(DatasetConfig):
"""
Dataset config for Confluence Application Connector
Args:
location (str): The location of the pages to fetch
pull_attachments (bool, optional): Whether to pull attachments for each page
space_key (str, optional): The space key to fetch pages from
extract_bounding_boxes (bool, optional): Whether to extract bounding boxes from the documents
"""
location: str = dataclasses.field(default=None)
pull_attachments: bool = dataclasses.field(default=False)
space_key: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False)

def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.CONFLUENCE
Expand Down Expand Up @@ -57,13 +66,11 @@ class GoogleDriveDatasetConfig(DatasetConfig):
Args:
location (str): The regex location of the files to fetch
is_documentset (bool): Whether the dataset is a document set
csv_delimiter (str, optional): If the file format is CSV, use a specific csv delimiter
extract_bounding_boxes (bool, optional): Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
merge_file_schemas (bool, optional): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
"""
location: str = dataclasses.field(default=None)
is_documentset: bool = dataclasses.field(default=None)
csv_delimiter: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False)
merge_file_schemas: bool = dataclasses.field(default=False)
Expand Down Expand Up @@ -99,13 +106,11 @@ class OneDriveDatasetConfig(DatasetConfig):
Args:
location (str): The regex location of the files to fetch
is_documentset (bool): Whether the dataset is a document set
csv_delimiter (str, optional): If the file format is CSV, use a specific csv delimiter
extract_bounding_boxes (bool, optional): Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
merge_file_schemas (bool, optional): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
"""
location: str = dataclasses.field(default=None)
is_documentset: bool = dataclasses.field(default=None)
csv_delimiter: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False)
merge_file_schemas: bool = dataclasses.field(default=False)
Expand All @@ -127,7 +132,6 @@ class SharepointDatasetConfig(DatasetConfig):
merge_file_schemas (bool, optional): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
"""
location: str = dataclasses.field(default=None)
is_documentset: bool = dataclasses.field(default=None)
csv_delimiter: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False)
merge_file_schemas: bool = dataclasses.field(default=False)
Expand All @@ -152,11 +156,14 @@ class AbacusUsageMetricsDatasetConfig(DatasetConfig):
Args:
include_entire_conversation_history (bool): Whether to show the entire history for this deployment conversation
include_all_feedback (bool): Whether to include all feedback for this deployment conversation
"""
include_entire_conversation_history: bool = dataclasses.field(default=False)
include_all_feedback: bool = dataclasses.field(default=False)

def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.ABACUSUSAGEMETRICS
self.is_documentset = False


@dataclasses.dataclass
Expand Down
Loading

0 comments on commit df9c04f

Please sign in to comment.