-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy pathencoder.py
More file actions
133 lines (99 loc) · 4.01 KB
/
encoder.py
File metadata and controls
133 lines (99 loc) · 4.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
from datasets import load_dataset, Dataset
import pandas as pd
import argparse
from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
from transformers import AutoTokenizer
import evaluate
import numpy as np
import transformers
# Initialize argparse
parser = argparse.ArgumentParser(description='Configure training parameters.')
# Add arguments for training configuration
parser.add_argument('--batch_size', type=int, default=64, help='Batch size for training')
parser.add_argument('--num_train_epochs', type=int, default=30, help='Number of epochs for training')
parser.add_argument('--learning_rate', type=float, default=2e-5, help='Learning rate for the optimizer')
parser.add_argument('--model_checkpoint', type=str, default="VietAI/vit5-base", help='Model checkpoint to use')
# Parse arguments
args = parser.parse_args()
id2label = {0: "negative", 1: "neutral", 2: "positive"}
label2id = {"negative": 0, "neutral": 1, 'positive': 2}
# Assign variables from args
batch_size = args.batch_size
num_train_epochs = args.num_train_epochs
learning_rate = args.learning_rate
model_checkpoint = args.model_checkpoint
model = AutoModelForSequenceClassification.from_pretrained(
model_checkpoint, num_labels=3, id2label=id2label, label2id=label2id
)
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model_name = model_checkpoint.split("/")[-1]
# Load the Sentiment-Reasoning dataset from Hugging Face
ds = load_dataset("leduckhai/Sentiment-Reasoning")
# Get train and test splits
train_dataset = ds['train']
test_dataset = ds['test']
# Convert label to string
train_dataset = train_dataset.map(lambda x: {'label': str(x['label'])})
test_dataset = test_dataset.map(lambda x: {'label': str(x['label'])})
print("Train labels:", set(train_dataset['label']))
print("Test labels:", set(test_dataset['label']))
print(f"Train size: {len(train_dataset)}, Test size: {len(test_dataset)}")
def preprocess_function(examples):
return tokenizer(examples['text'], truncation=True, padding=True)
tokenized_dataset_train = train_dataset.map(preprocess_function, batched=True)
tokenized_dataset_test = test_dataset.map(preprocess_function, batched=True)
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
# Load the individual metrics
import evaluate
accuracy = evaluate.load("accuracy")
f1 = evaluate.load("f1")
precision = evaluate.load("precision")
recall = evaluate.load("recall")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
neg,neu,pos = f1.compute(predictions=predictions, references=labels, average=None)['f1']
# Compute each metric as needed
metrics_result = {
"accuracy": accuracy.compute(predictions=predictions, references=labels)['accuracy'],
"macro_f1": f1.compute(predictions=predictions, references=labels, average='macro')['f1'],
"f1_neg": neg,
"f1_neu": neu,
"f1_pos": pos
}
return metrics_result
# This modified function should now work without the TypeError
## Train
training_args = TrainingArguments(
output_dir=f"results/{model_name}",
lr_scheduler_type='cosine',
learning_rate=learning_rate,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=num_train_epochs,
weight_decay=0.01,
evaluation_strategy="epoch",
save_strategy="epoch",
logging_strategy='epoch',
load_best_model_at_end=True,
save_total_limit=2,
bf16=True,
warmup_ratio=0.05,
metric_for_best_model='eval_macro_f1',
# push_to_hub=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset_train,
eval_dataset=tokenized_dataset_test,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
callbacks = [transformers.EarlyStoppingCallback(early_stopping_patience=3)]
)
print('model_checkpoint', model_checkpoint)
trainer.train()
trainer.save_model()
trainer.evaluate()