forked from pytorch/torchtune
-
Notifications
You must be signed in to change notification settings - Fork 0
/
7B_full.yaml
78 lines (68 loc) · 2.21 KB
/
7B_full.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
# Config for multi-device full finetuning in full_finetune_distributed.py
# using a Mistral 7B model
#
# This config uses hyperparameters based on small set of experiments and information
# available on various forums. These are not meant to replicate the numbers
# from the paper
#
# This config assumes that you've run the following command before launching
# this run:
# tune download mistralai/Mistral-7B-v0.1 --hf-token <HF_TOKEN> --output-dir /tmp/Mistral-7B-v0.1
#
# Run this config on 4 GPUs using the following:
# tune run --nnodes 1 --nproc_per_node 4 full_finetune_distributed --config mistral/7B_full
#
# You can add specific overrides through the command line. For example
# to override the checkpointer directory while launching training
# you can run:
# tune run --nnodes 1 --nproc_per_node 4 full_finetune_distributed --config mistral/7B_full checkpointer.checkpoint_dir=<YOUR_CHECKPOINT_DIR>
#
# This config works best when the model is being fine-tuned on 2+ GPUs.
# Single device full finetuning requires more memory optimizations. It's
# best to use 7B_full_single_device.yaml for those cases
# Tokenizer
tokenizer:
_component_: torchtune.models.mistral.mistral_tokenizer
path: /tmp/Mistral-7B-v0.1/tokenizer.model
# Dataset
dataset:
_component_: torchtune.datasets.alpaca_dataset
train_on_input: True
seed: null
shuffle: True
# Model Arguments
model:
_component_: torchtune.models.mistral.mistral_7b
checkpointer:
_component_: torchtune.utils.FullModelHFCheckpointer
checkpoint_dir: /tmp/Mistral-7B-v0.1
checkpoint_files: [
pytorch_model-00001-of-00002.bin,
pytorch_model-00002-of-00002.bin
]
recipe_checkpoint: null
output_dir: /tmp/Mistral-7B-v0.1/
model_type: MISTRAL
resume_from_checkpoint: False
# Fine-tuning arguments
batch_size: 2
epochs: 3
optimizer:
_component_: torch.optim.AdamW
lr: 5e-6
loss:
_component_: torch.nn.CrossEntropyLoss
max_steps_per_epoch: null
gradient_accumulation_steps: 1
# Training env
device: cuda
# Memory management
enable_activation_checkpointing: True
# Reduced precision
dtype: bf16
# Logging
metric_logger:
_component_: torchtune.utils.metric_logging.DiskLogger
log_dir: ${output_dir}
output_dir: /tmp/Mistral-7B-v0.1/
log_every_n_steps: null