@@ -28,29 +28,30 @@ def parse_vtt_files(directory):
2828def classify_misogyny (sentences , model_pipeline ):
2929 """Classifies misogyny and non-misogyny scores for each sentence."""
3030 misogyny_scores = []
31- non_misogyny_scores = []
31+ labels = []
3232
3333 with alive_bar (len (sentences ), title = "Processing Sentences" ) as bar :
3434 for sentence in sentences :
3535 result = model_pipeline (sentence )
3636
3737 # Default scores.
3838 misogynist_score = 0
39- non_misogynist_score = 0
39+ label = "non-misogynist"
4040
4141 # Extract scores based on label.
4242 for entry in result :
4343 if entry ["label" ] == "misogynist" :
44- # Negative for misogynistic.
4544 misogynist_score = - entry ["score" ]
45+ label = "misogynist"
4646 elif entry ["label" ] == "non-misogynist" :
47- non_misogynist_score = entry ["score" ]
47+ misogynist_score = entry ["score" ]
48+ label = "non-misogynist"
4849
4950 misogyny_scores .append (misogynist_score )
50- non_misogyny_scores .append (non_misogynist_score )
51+ labels .append (label )
5152 bar ()
5253
53- return misogyny_scores , non_misogyny_scores
54+ return misogyny_scores , labels
5455
5556
5657def plot_pie_chart (labels , sentences , output_filename , title ):
@@ -61,7 +62,7 @@ def plot_pie_chart(labels, sentences, output_filename, title):
6162 non_misogynist_count = labels .count ("non-misogynist" )
6263
6364 # Data for pie chart.
64- values = [misogynist_count , non_misogynist_count ]
65+ values = [misogyny_count , non_misogynist_count ]
6566 labels = ["Misogyny" , "Non Misogyny" ]
6667 custom_colors = ["#ff1b6b" , "#45caff" ]
6768
0 commit comments