-
Notifications
You must be signed in to change notification settings - Fork 23
/
arguments.py
66 lines (53 loc) · 2.15 KB
/
arguments.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from typing import Dict, Optional, List
from dataclasses import dataclass, field
import transformers
from supported_models import MODEL_HF_PATH, MODEL_FAMILIES
@dataclass
class ModelArguments:
model_id: str = field(default="llava-1.5-7b")
model_local_path: Optional[str] = field(default=None)
def __post_init__(self):
assert self.model_id in MODEL_HF_PATH, f"Unknown model_id: {self.model_id}"
self.model_hf_path: str = MODEL_HF_PATH[self.model_id]
assert self.model_id in MODEL_FAMILIES, f"Unknown model_id: {self.model_id}"
self.model_family_id: str = MODEL_FAMILIES[self.model_id]
if not self.model_local_path:
self.model_local_path = self.model_hf_path
@dataclass
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data json file."}
)
eval_data_path: Optional[str] = field(
default=None, metadata={"help": "Path to the evaluation data json file."}
)
image_folder: Optional[str] = field(default=None)
video_folder: Optional[str] = field(default=None)
num_frames: Optional[int] = field(default=8)
user_key: Optional[str] = field(default="human")
assistant_key: Optional[str] = field(default="gpt")
@dataclass
class TrainingArguments(transformers.TrainingArguments):
model_max_length: int = field(
default=1024,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
use_flash_attn: bool = field(default=False)
train_vision_encoder: bool = field(default=False)
train_vision_projector: bool = field(default=False)
mask_question_tokens: bool = field(default=True)
def __post_init__(self):
super().__post_init__()
self.remove_unused_columns = False
@dataclass
class LoraArguments:
use_lora: bool = field(default=True)
use_vision_lora: bool = field(default=True)
q_lora: bool = field(default=False)
lora_r: int = field(default=8)
lora_alpha: int = field(default=16)
lora_dropout: float = field(default=0.05)
lora_weight_path: str = ""
lora_bias: str = "none"