Skip to content
This repository has been archived by the owner on Sep 24, 2024. It is now read-only.

Commit

Permalink
initial eval server commit
Browse files Browse the repository at this point in the history
  • Loading branch information
veekaybee committed Jan 29, 2024
1 parent 3f9fabb commit 6e68d13
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 11 deletions.
8 changes: 7 additions & 1 deletion src/flamingo/jobs/lm_harness/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,17 @@ class LMHarnessEvaluatorConfig(BaseFlamingoConfig):
num_fewshot: int | None = None
limit: int | float | None = None

class InferenceServerConfig(BaseFlamingoConfig):
"""Inference Server URL endpoint path"""

base_url: String | None



class LMHarnessJobConfig(BaseFlamingoConfig):
"""Configuration to run an lm-evaluation-harness evaluation job."""

model: AutoModelConfig
model: AutoModelConfig | InferenceServerConfig
evaluator: LMHarnessEvaluatorConfig
quantization: QuantizationConfig | None = None
tracking: WandbRunConfig | None = None
Expand Down
32 changes: 22 additions & 10 deletions src/flamingo/jobs/lm_harness/entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import ray
import wandb
from lm_eval.models.huggingface import HFLM
from lm_eval.models.OpenaiCompletionsLM import OpenaiCompletionsLM
from peft import PeftConfig

from flamingo.integrations.huggingface import resolve_loadable_path
Expand All @@ -25,19 +26,30 @@ def build_evaluation_artifact(run_name: str, results: dict[str, dict[str, Any]])
return artifact


def load_harness_model(config: LMHarnessJobConfig) -> HFLM:
def load_harness_model(config: LMHarnessJobConfig) -> HFLM | OpenaiCompletionsLM:
# Helper method to return lm-harness model wrapper
def _loader(pretrained: str, tokenizer: str, peft: str | None):
quantization_kwargs = config.quantization.dict() if config.quantization else {}
return HFLM(
pretrained=pretrained,
tokenizer=tokenizer,
peft=peft,
device="cuda" if config.ray.num_gpus > 0 else None,
trust_remote_code=config.model.trust_remote_code,
dtype=config.model.torch_dtype if config.model.torch_dtype else "auto",
**quantization_kwargs,
)

"""Load model directly from HF if HF path, otherwise from an inference server URL"""

if isinstance(config.model) == AutoModelConfig:
return HFLM(
pretrained=pretrained,
tokenizer=tokenizer,
peft=peft,
device="cuda" if config.ray.num_gpus > 0 else None,
trust_remote_code=config.model.trust_remote_code,
dtype=config.model.torch_dtype if config.model.torch_dtype else "auto",
**quantization_kwargs,
)
elif isinstance(config.model) == InferenceServerConfig:
return OpenaiCompletionsLM(
model=pretrained,
base_url = base_url,
tokenizer = tokenizer,
)


# We don't know if the checkpoint is adapter weights or merged model weights
# Try to load as an adapter and fall back to the checkpoint containing the full model
Expand Down

0 comments on commit 6e68d13

Please sign in to comment.