chore: _
This commit is contained in:
20
config.py
20
config.py
@@ -8,8 +8,8 @@ class DataConfig:
|
||||
---
|
||||
Translation:
|
||||
{}"""
|
||||
train_split: float = 0.9
|
||||
max_samples: int | None = 3000
|
||||
train_split: float = 0.95
|
||||
max_samples: int | None = 5000
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -31,7 +31,7 @@ class TrainingConfig:
|
||||
base_model: str = "unsloth/Qwen2.5-7B"
|
||||
max_seq_length: int = 6144
|
||||
dtype: str | None = None
|
||||
load_in_4bit: bool = False
|
||||
load_in_4bit: bool = True
|
||||
|
||||
# LoRA
|
||||
lora_r: int = 64
|
||||
@@ -48,7 +48,7 @@ class TrainingConfig:
|
||||
"down_proj",
|
||||
]
|
||||
)
|
||||
use_gradient_checkpointing: str = "unsloth"
|
||||
use_gradient_checkpointing: str = True
|
||||
random_state: int = 3407
|
||||
use_rslora: bool = False
|
||||
loftq_config: dict | None = None
|
||||
@@ -56,11 +56,11 @@ class TrainingConfig:
|
||||
# training args
|
||||
per_device_train_batch_size: int = 16
|
||||
gradient_accumulation_steps: int = 2
|
||||
warmup_ratio: float = 0.05
|
||||
warmup_ratio: float = 0.1
|
||||
max_grad_norm: float = 1.0
|
||||
num_train_epochs: float = 1
|
||||
learning_rate: float = 5e-4
|
||||
weight_decay: float = 0
|
||||
weight_decay: float = 0.01
|
||||
lr_scheduler_type: str = "cosine"
|
||||
logging_steps: int = 1
|
||||
|
||||
@@ -70,15 +70,15 @@ class TrainingConfig:
|
||||
save_total_limit: int | None = 3
|
||||
|
||||
# dataset
|
||||
dataset_num_proc: int = 8
|
||||
dataset_num_proc: int = 4
|
||||
packing: bool = True
|
||||
|
||||
# eval
|
||||
fp16_full_eval: bool = True
|
||||
per_device_eval_batch_size: int = 16
|
||||
eval_accumulation_steps: int = 4
|
||||
per_device_eval_batch_size: int = 64
|
||||
eval_accumulation_steps: int = 1
|
||||
eval_strategy: str = "steps"
|
||||
eval_steps: int = 5
|
||||
eval_steps: int = 10
|
||||
|
||||
# output
|
||||
output_dir: str = "/workspace/output/"
|
||||
|
||||
Reference in New Issue
Block a user