Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix for Path issue #1377

Open
wants to merge 1 commit into
base: staging
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions evadb/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,5 @@
IFRAMES = "IFRAMES"
AUDIORATE = "AUDIORATE"
DEFAULT_FUNCTION_EXPRESSION_COST = 100
DBFUNCTIONS = __file__[0: -13] + "/functions/"
ENVFUNCTIONS = "./Lib/site-packages/evadb/functions/"
4 changes: 2 additions & 2 deletions evadb/functions/abstract/pytorch_abstract_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import pandas as pd
from numpy.typing import ArrayLike

from evadb.configuration.configuration_manager import ConfigurationManager
from evadb.functions.abstract.abstract_function import (
AbstractClassifierFunction,
AbstractTransformationFunction,
Expand Down Expand Up @@ -73,8 +74,7 @@ def __call__(self, *args, **kwargs) -> pd.DataFrame:
if isinstance(frames, pd.DataFrame):
frames = frames.transpose().values.tolist()[0]

# hardcoding it for now, need to be fixed @xzdandy
gpu_batch_size = 1
gpu_batch_size = ConfigurationManager().get_value("executor", "gpu_batch_size")
import torch

tens_batch = torch.cat([self.transform(x) for x in frames]).to(
Expand Down
25 changes: 12 additions & 13 deletions evadb/functions/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from retry import retry

from evadb.catalog.catalog_type import NdArrayType
from evadb.configuration.configuration_manager import ConfigurationManager
from evadb.functions.abstract.abstract_function import AbstractFunction
from evadb.functions.decorators.decorators import forward, setup
from evadb.functions.decorators.io_descriptors.data_types import PandasDataframe
Expand Down Expand Up @@ -84,12 +85,10 @@ def setup(
self,
model="gpt-3.5-turbo",
temperature: float = 0,
openai_api_key="",
) -> None:
assert model in _VALID_CHAT_COMPLETION_MODEL, f"Unsupported ChatGPT {model}"
self.model = model
self.temperature = temperature
self.openai_api_key = openai_api_key

@forward(
input_signatures=[
Expand All @@ -115,20 +114,20 @@ def setup(
)
def forward(self, text_df):
try_to_import_openai()
from openai import OpenAI

api_key = self.openai_api_key
if len(self.openai_api_key) == 0:
api_key = os.environ.get("OPENAI_API_KEY", "")
assert (
len(api_key) != 0
), "Please set your OpenAI API key using SET OPENAI_API_KEY = 'sk-' or environment variable (OPENAI_API_KEY)"

client = OpenAI(api_key=api_key)
import openai

@retry(tries=6, delay=20)
def completion_with_backoff(**kwargs):
return client.chat.completions.create(**kwargs)
return openai.ChatCompletion.create(**kwargs)

# Register API key, try configuration manager first
openai.api_key = ConfigurationManager().get_value("third_party", "OPENAI_KEY")
# If not found, try OS Environment Variable
if len(openai.api_key) == 0:
openai.api_key = os.environ.get("OPENAI_KEY", "")
assert (
len(openai.api_key) != 0
), "Please set your OpenAI API key in evadb.yml file (third_party, open_api_key) or environment variable (OPENAI_KEY)"

queries = text_df[text_df.columns[0]]
content = text_df[text_df.columns[0]]
Expand Down
25 changes: 13 additions & 12 deletions evadb/functions/dalle.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from PIL import Image

from evadb.catalog.catalog_type import NdArrayType
from evadb.configuration.configuration_manager import ConfigurationManager
from evadb.functions.abstract.abstract_function import AbstractFunction
from evadb.functions.decorators.decorators import forward
from evadb.functions.decorators.io_descriptors.data_types import PandasDataframe
Expand All @@ -33,8 +34,8 @@ class DallEFunction(AbstractFunction):
def name(self) -> str:
return "DallE"

def setup(self, openai_api_key="") -> None:
self.openai_api_key = openai_api_key
def setup(self) -> None:
pass

@forward(
input_signatures=[
Expand All @@ -56,25 +57,25 @@ def setup(self, openai_api_key="") -> None:
)
def forward(self, text_df):
try_to_import_openai()
from openai import OpenAI
import openai

api_key = self.openai_api_key
if len(self.openai_api_key) == 0:
api_key = os.environ.get("OPENAI_API_KEY", "")
# Register API key, try configuration manager first
openai.api_key = ConfigurationManager().get_value("third_party", "OPENAI_KEY")
# If not found, try OS Environment Variable
if openai.api_key is None or len(openai.api_key) == 0:
openai.api_key = os.environ.get("OPENAI_KEY", "")
assert (
len(api_key) != 0
), "Please set your OpenAI API key using SET OPENAI_API_KEY = 'sk-' or environment variable (OPENAI_API_KEY)"

client = OpenAI(api_key=api_key)
len(openai.api_key) != 0
), "Please set your OpenAI API key in evadb.yml file (third_party, open_api_key) or environment variable (OPENAI_KEY)"

def generate_image(text_df: PandasDataframe):
results = []
queries = text_df[text_df.columns[0]]
for query in queries:
response = client.images.generate(prompt=query, n=1, size="1024x1024")
response = openai.Image.create(prompt=query, n=1, size="1024x1024")

# Download the image from the link
image_response = requests.get(response.data[0].url)
image_response = requests.get(response["data"][0]["url"])
image = Image.open(BytesIO(image_response.content))

# Convert the image to an array format suitable for the DataFrame
Expand Down
74 changes: 8 additions & 66 deletions evadb/functions/forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
# limitations under the License.


import os
import pickle

import pandas as pd
Expand All @@ -38,86 +37,29 @@ def setup(
id_column_rename: str,
horizon: int,
library: str,
conf: int,
):
self.library = library
if "neuralforecast" in self.library:
from neuralforecast import NeuralForecast

loaded_model = NeuralForecast.load(path=model_path)
self.model_name = model_name[4:] if "Auto" in model_name else model_name
else:
with open(model_path, "rb") as f:
loaded_model = pickle.load(f)
self.model_name = model_name
f = open(model_path, "rb")
loaded_model = pickle.load(f)
f.close()
self.model = loaded_model
self.model_name = model_name
self.predict_column_rename = predict_column_rename
self.time_column_rename = time_column_rename
self.id_column_rename = id_column_rename
self.horizon = int(horizon)
self.library = library
self.suggestion_dict = {
1: "Predictions are flat. Consider using LIBRARY 'neuralforecast' for more accrate predictions.",
}
self.conf = conf
self.hypers = None
self.rmse = None
if os.path.isfile(model_path + "_rmse"):
with open(model_path + "_rmse", "r") as f:
self.rmse = float(f.readline())
if "arima" in model_name.lower():
self.hypers = "p,d,q: " + f.readline()

def forward(self, data) -> pd.DataFrame:
log_str = ""
if self.library == "statsforecast":
forecast_df = self.model.predict(
h=self.horizon, level=[self.conf]
).reset_index()
forecast_df = self.model.predict(h=self.horizon)
else:
forecast_df = self.model.predict().reset_index()

# Feedback
if len(data) == 0 or list(list(data.iloc[0]))[0] is True:
# Suggestions
suggestion_list = []
# 1: Flat predictions
if self.library == "statsforecast":
for type_here in forecast_df["unique_id"].unique():
if (
forecast_df.loc[forecast_df["unique_id"] == type_here][
self.model_name
].nunique()
== 1
):
suggestion_list.append(1)

for suggestion in set(suggestion_list):
log_str += "\nSUGGESTION: " + self.suggestion_dict[suggestion]

# Metrics
if self.rmse is not None:
log_str += "\nMean normalized RMSE: " + str(self.rmse)
if self.hypers is not None:
log_str += "\nHyperparameters: " + self.hypers

print(log_str)

forecast_df = self.model.predict()
forecast_df.reset_index(inplace=True)
forecast_df = forecast_df.rename(
columns={
"unique_id": self.id_column_rename,
"ds": self.time_column_rename,
self.model_name
if self.library == "statsforecast"
else self.model_name + "-median": self.predict_column_rename,
self.model_name
+ "-lo-"
+ str(self.conf): self.predict_column_rename
+ "-lo",
self.model_name
+ "-hi-"
+ str(self.conf): self.predict_column_rename
+ "-hi",
self.model_name: self.predict_column_rename,
}
)[: self.horizon * forecast_df["unique_id"].nunique()]
return forecast_df
35 changes: 8 additions & 27 deletions evadb/functions/function_bootstrap_queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@
from evadb.database import EvaDBDatabase
from evadb.server.command_handler import execute_query_fetch_all

import sys
import subprocess

NDARRAY_DIR = "ndarray"
TUTORIALS_DIR = "tutorials"

Expand Down Expand Up @@ -214,30 +217,6 @@
EvaDB_INSTALLATION_DIR
)

Upper_function_query = """CREATE FUNCTION IF NOT EXISTS UPPER
INPUT (input ANYTYPE)
OUTPUT (output NDARRAY STR(ANYDIM))
IMPL '{}/functions/helpers/upper.py';
""".format(
EvaDB_INSTALLATION_DIR
)

Lower_function_query = """CREATE FUNCTION IF NOT EXISTS LOWER
INPUT (input ANYTYPE)
OUTPUT (output NDARRAY STR(ANYDIM))
IMPL '{}/functions/helpers/lower.py';
""".format(
EvaDB_INSTALLATION_DIR
)

Concat_function_query = """CREATE FUNCTION IF NOT EXISTS CONCAT
INPUT (input ANYTYPE)
OUTPUT (output NDARRAY STR(ANYDIM))
IMPL '{}/functions/helpers/concat.py';
""".format(
EvaDB_INSTALLATION_DIR
)


def init_builtin_functions(db: EvaDBDatabase, mode: str = "debug") -> None:
"""Load the built-in functions into the system during system bootstrapping.
Expand Down Expand Up @@ -285,9 +264,6 @@ def init_builtin_functions(db: EvaDBDatabase, mode: str = "debug") -> None:
Yolo_function_query,
stablediffusion_function_query,
dalle_function_query,
Upper_function_query,
Lower_function_query,
Concat_function_query,
]

# if mode is 'debug', add debug functions
Expand All @@ -306,6 +282,11 @@ def init_builtin_functions(db: EvaDBDatabase, mode: str = "debug") -> None:
# ignore exceptions during the bootstrapping phase due to missing packages
for query in queries:
try:
#Uncomment to force pip installs onto local device
#subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'norfair'])
#subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'ultralytics'])
#subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'facenet-pytorch'])
#cursor.query("DROP FUNCTION IF EXISTS NorFairTracker;").df()
execute_query_fetch_all(
db, query, do_not_print_exceptions=False, do_not_raise_exceptions=True
)
Expand Down
37 changes: 37 additions & 0 deletions evadb/functions/norfair/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""
A customizable lightweight Python library for real-time multi-object tracking.

Examples
--------
>>> from norfair import Detection, Tracker, Video, draw_tracked_objects
>>> detector = MyDetector() # Set up a detector
>>> video = Video(input_path="video.mp4")
>>> tracker = Tracker(distance_function="euclidean", distance_threshold=50)
>>> for frame in video:
>>> detections = detector(frame)
>>> norfair_detections = [Detection(points) for points in detections]
>>> tracked_objects = tracker.update(detections=norfair_detections)
>>> draw_tracked_objects(frame, tracked_objects)
>>> video.write(frame)
"""
import sys

from .distances import *
from .drawing import *
from .filter import (
FilterPyKalmanFilterFactory,
NoFilterFactory,
OptimizedKalmanFilterFactory,
)
from .tracker import Detection, Tracker
from .utils import get_cutout, print_objects_as_table
from .video import Video

if sys.version_info >= (3, 8):
import importlib.metadata

__version__ = importlib.metadata.version(__name__)
elif sys.version_info < (3, 8):
import importlib_metadata

__version__ = importlib_metadata.version(__name__)
Loading