Skip to content

Commit

Permalink
Merge pull request #232 from shalousun/main
Browse files Browse the repository at this point in the history
feat: The evalscope perf command supports the `--outputs-dir` configuration.
  • Loading branch information
Yunnglin authored Dec 9, 2024
2 parents 365aa59 + 856331e commit 5aa03b4
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 5 deletions.
9 changes: 8 additions & 1 deletion evalscope/perf/arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ class Arguments:
wandb_api_key: Optional[str] = None # WandB API key for logging
name: Optional[str] = None # Name for the run

# Output settings
outputs_dir: str = 'outputs'

# Prompt settings
max_prompt_length: int = sys.maxsize # Maximum length of the prompt
min_prompt_length: int = 0 # Minimum length of the prompt
Expand All @@ -57,7 +60,6 @@ class Arguments:

@staticmethod
def from_args(args):

return Arguments(
model=args.model,
attn_implementation=args.attn_implementation,
Expand All @@ -72,6 +74,7 @@ def from_args(args):
headers=args.headers,
wandb_api_key=args.wandb_api_key,
name=args.name,
outputs_dir=args.outputs_dir,
debug=args.debug,
tokenizer_path=args.tokenizer_path,
api=args.api,
Expand Down Expand Up @@ -152,6 +155,9 @@ def add_argument(parser: argparse.ArgumentParser):
parser.add_argument('--prompt', type=str, required=False, default=None, help='Specified the request prompt')
parser.add_argument('--query-template', type=str, default=None, help='Specify the query template')

# Output settings
parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')

# Dataset settings
parser.add_argument('--dataset', type=str, default='openqa', help='Specify the dataset')
parser.add_argument('--dataset-path', type=str, required=False, help='Path to the dataset file')
Expand All @@ -170,6 +176,7 @@ def add_argument(parser: argparse.ArgumentParser):
parser.add_argument('--stream', action='store_true', help='Stream output with SSE', default=None)
parser.add_argument('--temperature', type=float, help='The sample temperature', default=None)
parser.add_argument('--top-p', type=float, help='Sampling top p', default=None)

# yapf: enable


Expand Down
4 changes: 2 additions & 2 deletions evalscope/perf/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ async def statistic_benchmark_metric_worker(benchmark_data_queue: asyncio.Queue,
api_plugin_class = ApiRegistry(args.api)
api_plugin = api_plugin_class(args.tokenizer_path)

result_db_path = get_result_db_path(args.name, args.model)
result_db_path = get_result_db_path(args.name, args.model, args.outputs_dir)
# Initialize wandb
if args.wandb_api_key:
import wandb
Expand Down Expand Up @@ -192,7 +192,7 @@ async def statistic_benchmark_metric_worker(benchmark_data_queue: asyncio.Queue,
async def start_server(args: Arguments) -> bool:
if args.api.startswith('local'):
# start local server
server = threading.Thread(target=start_app, args=(copy.deepcopy(args), ), daemon=True)
server = threading.Thread(target=start_app, args=(copy.deepcopy(args),), daemon=True)
server.start()

if args.dataset.startswith('speed_benchmark'):
Expand Down
3 changes: 1 addition & 2 deletions evalscope/perf/utils/db_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,8 @@ def insert_benchmark_data(cursor: sqlite3.Cursor, benchmark_data: BenchmarkData)
cursor.execute(query, common_columns)


def get_result_db_path(name, model):
def get_result_db_path(name, model, output_dir):
current_time = datetime.now().strftime('%Y%m%d_%H%M%S')
output_dir = './outputs'
result_db_path = os.path.join(output_dir, f'{name or model}_perf', current_time, 'benchmark_data.db')

if not os.path.exists(os.path.dirname(result_db_path)):
Expand Down

0 comments on commit 5aa03b4

Please sign in to comment.