From 8c183586334c7f0dfc986a5a661c34ed20c267cc Mon Sep 17 00:00:00 2001 From: David Hall Date: Tue, 13 Feb 2024 13:59:40 -0800 Subject: [PATCH] reduce batch size for lora_llama2 --- config/lora_llama2.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/lora_llama2.yaml b/config/lora_llama2.yaml index f4241f44e..cf6592153 100644 --- a/config/lora_llama2.yaml +++ b/config/lora_llama2.yaml @@ -9,7 +9,7 @@ trainer: project: "levanter-lora" tags: ["lora", "llama2"] num_train_steps: 5000 # tune to suit your needs - train_batch_size: 128 + train_batch_size: 64 # if using model parallelism, this is useful: tensor_parallel_axes: ["mlp", "heads"]