support falcon model #72

This commit is contained in:
hiyouga
2023-07-05 15:00:06 +08:00
parent 966b5c70fc
commit c136f362c1
5 changed files with 46 additions and 29 deletions

View File

@@ -198,6 +198,7 @@ class FinetuningArguments:
metadata={"help": "Number of decoder blocks in the model. \
LLaMA choices: [\"32\", \"40\", \"60\", \"80\"], \
BLOOM choices: [\"24\", \"30\", \"70\"], \
Falcon choices: [\"32\", \"60\"], \
Baichuan choices: [\"32\"]"}
)
num_layer_trainable: Optional[int] = field(
@@ -208,7 +209,7 @@ class FinetuningArguments:
default="mlp",
metadata={"help": "Name of trainable modules for Freeze fine-tuning. \
LLaMA choices: [\"mlp\", \"self_attn\"], \
BLOOM choices: [\"mlp\", \"self_attention\"], \
BLOOM & Falcon choices: [\"mlp\", \"self_attention\"], \
Baichuan choices: [\"mlp\", \"self_attn\"]"}
)
lora_rank: Optional[int] = field(
@@ -227,7 +228,7 @@ class FinetuningArguments:
default="q_proj,v_proj",
metadata={"help": "Name(s) of target modules to apply LoRA. Use comma to separate multiple modules. \
LLaMA choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \
BLOOM choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \
BLOOM & Falcon choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \
Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"]"}
)