-
Notifications
You must be signed in to change notification settings - Fork 63
Expand file tree
/
Copy pathSubtitleTranslate - ChatGPT.as
More file actions
1237 lines (1136 loc) · 50.9 KB
/
SubtitleTranslate - ChatGPT.as
File metadata and controls
1237 lines (1136 loc) · 50.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Real-time subtitle translation for PotPlayer using OpenAI ChatGPT API
*/
// Plugin Information Functions
string GetTitle() {
return "{$CP949=ChatGPT 번역$}"
+ "{$CP950=ChatGPT 翻譯$}"
+ "{$CP936=ChatGPT 翻译$}"
+ "{$CP0=ChatGPT Translate$}";
}
// The version number will be replaced during the installation process
string GetVersion() {
return "1.9.2";
}
string GetDesc() {
return "Real-time subtitle translation using OpenAI ChatGPT.";
}
string GetLoginTitle() {
return "{$CP949=OpenAI 모델 및 API 키 구성$}"
+ "{$CP950=OpenAI 模型與 API 金鑰配置$}"
+ "{$CP936=OpenAI 模型与 API 密钥配置$}"
+ "{$CP0=OpenAI Model + API URL and API Key Configuration$}";
}
string GetLoginDesc() {
return "{$CP949=모델 이름, API 주소, 선택적 nullkey, 지연(ms) 및 재시도 모드(0-3)를 입력하십시오 (예: gpt-5-nano|https://api.openai.com/v1/chat/completions|nullkey|500|retry1).$}"
+ "{$CP949=\n\n설치 프로그램에서 미리 구성한 값이 있다면 PotPlayer 패널에서 다시 설정하기 전까지 해당 값을 사용하며, 패널에서 설정하면 해당 설정이 항상 우선 적용됩니다.$}"
+ "{$CP949=\n\n선택적으로 cache=auto 또는 cache=off 를 추가하여 문맥 캐시 모드를 제어할 수 있으며, auto 는 지원되지 않을 경우 chat 방식으로 자동 전환됩니다.$}"
+ "{$CP950=請輸入模型名稱、API 地址、可選的 nullkey、延遲毫秒與重試模式(0-3)(例如: gpt-5-nano|https://api.openai.com/v1/chat/completions|nullkey|500|retry1)。$}"
+ "{$CP950=\n\n如果安裝包已寫入預設配置,在 PotPlayer 面板中未重新設定之前會沿用這些配置;一旦在面板中調整,將始終以面板設定為準。$}"
+ "{$CP950=\n\n可選加上 cache=auto 或 cache=off 以控制上下文快取模式,auto 會在不支援時自動回退至 chat。$}"
+ "{$CP936=请输入模型名称、API 地址、可选的 nullkey、延迟毫秒和重试模式(0-3)(例如: gpt-5-nano|https://api.openai.com/v1/chat/completions|nullkey|500|retry1)。$}"
+ "{$CP936=\n\n如果安装包已经写入默认配置,在 PotPlayer 面板中没有重新设置之前会继续使用这些配置;一旦在面板中修改,将始终以面板设置为准。$}"
+ "{$CP936=\n\n可选追加 cache=auto 或 cache=off 用于控制上下文缓存模式,auto 在不支持时会自动回退到 chat。$}"
+ "{$CP0=Please enter the model name, API URL, optional 'nullkey', optional delay in ms, and retry mode 0-3 (e.g., gpt-5-nano|https://api.openai.com/v1/chat/completions|nullkey|500|retry1).$}"
+ "{$CP0=\n\nInstaller defaults will remain in effect until you update the settings in PotPlayer's panel, and any panel changes will always take priority.$}"
+ "{$CP0=\n\nOptionally append cache=auto or cache=off to control context caching. Auto falls back to chat when caching is unsupported.$}"
+ "{$CP0=\n\nFor OpenAI official API you can also append retention=24h (or cache24h) to extend prompt cache retention. For Gemini official API you can append gcache=cachedContents/... to reuse an explicit cache.$}";
}
string GetUserText() {
return "{$CP949=모델 이름|API 주소|nullkey|지연(ms)|재시도 모드|캐시 모드 (현재: " + GPT_selected_model + " | " + GPT_apiUrl + " | " + GPT_delay_ms + " | " + GPT_retry_mode + " | " + GPT_context_cache_mode + ")$}"
+ "{$CP950=模型名稱|API 地址|nullkey|延遲ms|重試模式|快取模式 (目前: " + GPT_selected_model + " | " + GPT_apiUrl + " | " + GPT_delay_ms + " | " + GPT_retry_mode + " | " + GPT_context_cache_mode + ")$}"
+ "{$CP936=模型名称|API 地址|nullkey|延迟ms|重试模式|缓存模式 (目前: " + GPT_selected_model + " | " + GPT_apiUrl + " | " + GPT_delay_ms + " | " + GPT_retry_mode + " | " + GPT_context_cache_mode + ")$}"
+ "{$CP0=Model Name|API URL|nullkey|Delay ms|Retry mode|Cache mode|Retention|Gemini cached_content (Current: " + GPT_selected_model + " | " + GPT_apiUrl + " | " + GPT_delay_ms + " | " + GPT_retry_mode + " | " + GPT_context_cache_mode + " | " + GPT_prompt_cache_retention + " | " + GPT_gemini_cached_content + ")$}";
}
string GetPasswordText() {
return "{$CP949=API 키:$}"
+ "{$CP950=API 金鑰:$}"
+ "{$CP936=API 密钥:$}"
+ "{$CP0=API Key:$}";
}
// Global Variables
// Pre-configured values (auto-filled by installer)
string GPT_pre_api_key = ""; // will be replaced during installation
string GPT_pre_selected_model = "gpt-5-nano"; // will be replaced during installation
string GPT_pre_apiUrl = "https://api.openai.com/v1/chat/completions"; // will be replaced during installation
string GPT_pre_delay_ms = "0"; // will be replaced during installation
string GPT_pre_retry_mode = "0"; // will be replaced during installation
string GPT_pre_context_token_budget = "6000"; // approx. tokens reserved for context (0 = auto)
string GPT_pre_context_truncation_mode = "drop_oldest"; // drop_oldest | smart_trim
string GPT_pre_context_cache_mode = "off"; // auto | off
string GPT_pre_prompt_cache_retention = ""; // ""(default), in-memory, 24h (OpenAI official only)
string GPT_pre_gemini_cached_content = ""; // optional cachedContents/... name for Gemini OpenAI-compatible endpoint
string GPT_pre_small_model = "0"; // 0 | 1
string GPT_pre_check_hallucination = "0"; // 0 | 1
string GPT_pre_model_token_limits_json = "{}"; // serialized token limit rules (injected by installer)
// Context-specific identifiers to prevent collisions with other subtitle translator scripts.
const string GPT_CTX_TRANSLATION_FAILURE_WARNING_PREFIX = "[Translation failed - please share a screenshot with the developer] ";
string GPT_api_key = GPT_pre_api_key;
string GPT_selected_model = GPT_pre_selected_model; // Default model
string GPT_apiUrl = GPT_pre_apiUrl; // Default API URL
string GPT_delay_ms = GPT_pre_delay_ms; // Request delay in ms
string GPT_retry_mode = GPT_pre_retry_mode; // Auto retry mode
string GPT_context_token_budget = GPT_pre_context_token_budget; // Approximate token budget for context
string GPT_context_truncation_mode = GPT_pre_context_truncation_mode; // Truncation mode when context exceeds budget
string GPT_context_cache_mode = GPT_pre_context_cache_mode; // auto | off
string GPT_prompt_cache_retention = GPT_pre_prompt_cache_retention; // "" | in-memory | 24h
string GPT_gemini_cached_content = GPT_pre_gemini_cached_content; // cachedContents/... (optional)
string GPT_small_model = GPT_pre_small_model;
string GPT_check_hallucination = GPT_pre_check_hallucination;
string GPT_UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64)";
array<string> GPT_subtitleHistory; // Global subtitle history
bool GPT_context_cache_disabled_for_session = false;
string GPT_context_cache_disable_key = "";
bool GPT_token_rules_initialized = false;
int GPT_default_model_token_limit = 4096;
array<string> GPT_token_rule_types;
array<string> GPT_token_rule_values;
array<int> GPT_token_rule_limits;
// Helper functions to load configuration while respecting installer defaults
string BuildConfigSentinel(const string &in key) {
return "#__POTPLAYER_CFG_UNSET__#" + key + "#__";
}
string LoadInstallerConfig(const string &in key, const string &in installerValue, const string &in fallbackKey = "") {
string sentinel = BuildConfigSentinel(key);
string storedValue = HostLoadString(key, sentinel);
if (storedValue == sentinel && fallbackKey != "") {
string fallbackSentinel = BuildConfigSentinel(fallbackKey);
string fallbackValue = HostLoadString(fallbackKey, fallbackSentinel);
if (fallbackValue != fallbackSentinel && fallbackValue != "")
return fallbackValue;
}
if (storedValue == sentinel || (storedValue == "" && installerValue != ""))
return installerValue;
return storedValue;
}
void EnsureConfigDefault(const string &in key, const string &in value) {
string sentinel = BuildConfigSentinel(key);
string stored = HostLoadString(key, sentinel);
if (stored == sentinel || (stored == "" && value != ""))
HostSaveString(key, value);
}
void EnsureInstallerDefaultsPersisted() {
EnsureConfigDefault("gpt_api_key", GPT_pre_api_key);
EnsureConfigDefault("gpt_selected_model", GPT_pre_selected_model);
EnsureConfigDefault("gpt_apiUrl", GPT_pre_apiUrl);
EnsureConfigDefault("gpt_delay_ms", GPT_pre_delay_ms);
EnsureConfigDefault("gpt_retry_mode", GPT_pre_retry_mode);
EnsureConfigDefault("gpt_context_token_budget", GPT_pre_context_token_budget);
EnsureConfigDefault("gpt_context_truncation_mode", GPT_pre_context_truncation_mode);
EnsureConfigDefault("gpt_context_cache_mode", GPT_pre_context_cache_mode);
EnsureConfigDefault("gpt_prompt_cache_retention", GPT_pre_prompt_cache_retention);
EnsureConfigDefault("gpt_gemini_cached_content", GPT_pre_gemini_cached_content);
EnsureConfigDefault("gpt_small_model", GPT_pre_small_model);
EnsureConfigDefault("gpt_check_hallucination", GPT_pre_check_hallucination);
}
void RefreshConfiguration() {
EnsureInstallerDefaultsPersisted();
GPT_api_key = LoadInstallerConfig("gpt_api_key", GPT_pre_api_key, "wc_api_key");
GPT_selected_model = LoadInstallerConfig("gpt_selected_model", GPT_pre_selected_model, "wc_selected_model");
GPT_apiUrl = LoadInstallerConfig("gpt_apiUrl", GPT_pre_apiUrl, "wc_apiUrl");
GPT_delay_ms = LoadInstallerConfig("gpt_delay_ms", GPT_pre_delay_ms, "wc_delay_ms");
GPT_retry_mode = LoadInstallerConfig("gpt_retry_mode", GPT_pre_retry_mode, "wc_retry_mode");
GPT_context_token_budget = LoadInstallerConfig("gpt_context_token_budget", GPT_pre_context_token_budget);
GPT_context_truncation_mode = LoadInstallerConfig("gpt_context_truncation_mode", GPT_pre_context_truncation_mode);
GPT_context_cache_mode = NormalizeCacheMode(LoadInstallerConfig("gpt_context_cache_mode", GPT_pre_context_cache_mode));
GPT_prompt_cache_retention = NormalizePromptCacheRetention(LoadInstallerConfig("gpt_prompt_cache_retention", GPT_pre_prompt_cache_retention));
GPT_gemini_cached_content = LoadInstallerConfig("gpt_gemini_cached_content", GPT_pre_gemini_cached_content).Trim();
GPT_small_model = LoadInstallerConfig("gpt_small_model", GPT_pre_small_model);
GPT_check_hallucination = LoadInstallerConfig("gpt_check_hallucination", GPT_pre_check_hallucination);
}
// Supported Language List
array<string> LangTable =
{
"", // Auto Detect
"af", // Afrikaans
"sq", // Albanian
"am", // Amharic
"ar", // Arabic
"hy", // Armenian
"az", // Azerbaijani
"eu", // Basque
"be", // Belarusian
"bn", // Bengali
"bs", // Bosnian
"bg", // Bulgarian
"ca", // Catalan
"ceb", // Cebuano
"ny", // Chichewa
"zh-CN", // Chinese (Simplified)
"zh-TW", // Chinese (Traditional)
"co", // Corsican
"hr", // Croatian
"cs", // Czech
"da", // Danish
"nl", // Dutch
"en", // English
"eo", // Esperanto
"et", // Estonian
"tl", // Filipino
"fi", // Finnish
"fr", // French
"fy", // Frisian
"gl", // Galician
"ka", // Georgian
"de", // German
"el", // Greek
"gu", // Gujarati
"ht", // Haitian Creole
"ha", // Hausa
"haw", // Hawaiian
"he", // Hebrew
"hi", // Hindi
"hmn", // Hmong
"hu", // Hungarian
"is", // Icelandic
"ig", // Igbo
"id", // Indonesian
"ga", // Irish
"it", // Italian
"ja", // Japanese
"jw", // Javanese
"kn", // Kannada
"kk", // Kazakh
"km", // Khmer
"ko", // Korean
"ku", // Kurdish (Kurmanji)
"ky", // Kyrgyz
"lo", // Lao
"la", // Latin
"lv", // Latvian
"lt", // Lithuanian
"lb", // Luxembourgish
"mk", // Macedonian
"ms", // Malay
"mg", // Malagasy
"ml", // Malayalam
"mt", // Maltese
"mi", // Maori
"mr", // Marathi
"mn", // Mongolian
"my", // Myanmar (Burmese)
"ne", // Nepali
"no", // Norwegian
"ps", // Pashto
"fa", // Persian
"pl", // Polish
"pt", // Portuguese
"pa", // Punjabi
"ro", // Romanian
"ru", // Russian
"sm", // Samoan
"gd", // Scots Gaelic
"sr", // Serbian
"st", // Sesotho
"sn", // Shona
"sd", // Sindhi
"si", // Sinhala
"sk", // Slovak
"sl", // Slovenian
"so", // Somali
"es", // Spanish
"su", // Sundanese
"sw", // Swahili
"sv", // Swedish
"tg", // Tajik
"ta", // Tamil
"te", // Telugu
"th", // Thai
"tr", // Turkish
"uk", // Ukrainian
"ur", // Urdu
"uz", // Uzbek
"vi", // Vietnamese
"cy", // Welsh
"xh", // Xhosa
"yi", // Yiddish
"yo", // Yoruba
"zu" // Zulu
};
array<string> GetSrcLangs() {
array<string> ret = LangTable;
return ret;
}
array<string> GetDstLangs() {
array<string> ret = LangTable;
return ret;
}
bool IsDigits(const string &in s) {
if (s.length() == 0)
return false;
for (uint i = 0; i < s.length(); i++) {
uint8 c = s[i];
if (c < 48 || c > 57)
return false;
}
return true;
}
int ParseInt(const string &in s) {
int v = 0;
for (uint i = 0; i < s.length(); i++) {
uint8 c = s[i];
if (c < 48 || c > 57)
return 0;
v = v * 10 + (c - 48);
}
return v;
}
bool EqualsIgnoreCase(const string &in a, const string &in b) {
if (a.length() != b.length())
return false;
for (uint i = 0; i < a.length(); i++) {
uint8 ca = a[i];
uint8 cb = b[i];
if (ca >= 65 && ca <= 90)
ca += 32;
if (cb >= 65 && cb <= 90)
cb += 32;
if (ca != cb)
return false;
}
return true;
}
string BuildAuthHeaders(const string &in key) {
string trimmedKey = key.Trim();
string lowerKey = string(trimmedKey).MakeLower();
string headers = "Content-Type: application/json";
if (trimmedKey != "" && lowerKey != "nullkey")
headers = "Authorization: Bearer " + trimmedKey + "\n" + headers;
return headers;
}
// API Key and API Base verification process
string ServerLogin(string User, string Pass) {
RefreshConfiguration();
string errorAccum = "";
User = User.Trim();
Pass = Pass.Trim();
string lowerPass = string(Pass).MakeLower();
array<string> tokens;
int start = 0;
for (int i = 0; i <= int(User.length()); i++) {
if (i == int(User.length()) || User.substr(i, 1) == "|") {
string token = User.substr(start, i - start).Trim();
tokens.insertLast(token);
start = i + 1;
}
}
string userModel = "";
string customApiUrl = "";
bool allowNullApiKey = (Pass == "" || lowerPass == "nullkey");
string delayToken = "";
string retryToken = "";
string cacheToken = "";
string promptCacheRetentionToken = GPT_prompt_cache_retention;
string geminiCachedContentToken = GPT_gemini_cached_content;
string smallModelToken = "";
string halluToken = "";
string normalizedCacheMode = GPT_context_cache_mode;
if (tokens.length() >= 1) {
userModel = tokens[0];
}
for (int i = 1; i < int(tokens.length()); i++) {
string t = tokens[i];
string lowered = ToLower(t);
if (lowered == "nullkey")
allowNullApiKey = true;
else if (lowered.length() >= 5 && lowered.substr(0,5) == "retry" && IsDigits(t.substr(5)))
retryToken = t.substr(5);
else if (IsDigits(t))
delayToken = t;
else if (lowered.length() >= 6 && lowered.substr(0,6) == "cache=")
cacheToken = lowered.substr(6);
else if (lowered == "cacheauto" || lowered == "cacheon" || lowered == "cache")
cacheToken = "auto";
else if (lowered == "cacheoff" || lowered == "nocache")
cacheToken = "off";
else if (lowered.length() >= 10 && lowered.substr(0,10) == "retention=")
promptCacheRetentionToken = lowered.substr(10);
else if (lowered == "cache24h")
promptCacheRetentionToken = "24h";
else if (lowered == "cachemem" || lowered == "cachememory")
promptCacheRetentionToken = "in-memory";
else if (lowered.length() >= 15 && lowered.substr(0,15) == "cachedcontent=")
geminiCachedContentToken = t.substr(15).Trim();
else if (lowered.length() >= 15 && lowered.substr(0,15) == "cached_content=")
geminiCachedContentToken = t.substr(15).Trim();
else if (lowered.length() >= 7 && lowered.substr(0,7) == "gcache=")
geminiCachedContentToken = t.substr(7).Trim();
else if (lowered == "gcacheoff" || lowered == "nocachedcontent")
geminiCachedContentToken = "";
else if (lowered == "smallmodel=1" || lowered == "smallmodel")
smallModelToken = "1";
else if (lowered == "smallmodel=0")
smallModelToken = "0";
else if (lowered == "checkhallucination=1" || lowered == "hallucination=1")
halluToken = "1";
else if (lowered == "checkhallucination=0" || lowered == "hallucination=0")
halluToken = "0";
else if (customApiUrl == "")
customApiUrl = t;
}
if (retryToken != "")
GPT_retry_mode = retryToken;
if (delayToken != "")
GPT_delay_ms = delayToken;
if (cacheToken != "")
normalizedCacheMode = NormalizeCacheMode(cacheToken);
else
normalizedCacheMode = NormalizeCacheMode(normalizedCacheMode);
promptCacheRetentionToken = NormalizePromptCacheRetention(promptCacheRetentionToken);
if (smallModelToken != "")
GPT_small_model = smallModelToken;
if (halluToken != "")
GPT_check_hallucination = halluToken;
if (userModel == "")
userModel = GPT_selected_model;
if (userModel == "") {
errorAccum += "Model name not entered. Please enter a valid model name.\n";
return errorAccum;
}
string apiUrlLocal = "";
if (customApiUrl != "") {
apiUrlLocal = customApiUrl;
while (apiUrlLocal != "" && apiUrlLocal.substr(apiUrlLocal.length()-1, 1) == "/")
apiUrlLocal = apiUrlLocal.substr(0, apiUrlLocal.length()-1);
} else {
apiUrlLocal = GPT_apiUrl;
}
if (Pass == "")
Pass = GPT_api_key;
if (string(Pass).MakeLower() == "nullkey")
allowNullApiKey = true;
if (!allowNullApiKey && Pass == "") {
errorAccum += "API Key not configured. Please enter a valid API Key.\n";
return errorAccum;
}
string storedApiKey = (lowerPass == "nullkey" || (allowNullApiKey && Pass == "")) ? "nullkey" : Pass;
bool isOfficial = IsOpenAIOfficialApiUrl(apiUrlLocal);
bool isGemini = IsGeminiApiUrl(apiUrlLocal);
string verifyHeaders = BuildAuthHeaders(Pass);
string testSystemMsg = "You are a test assistant.";
string testUserMsg = "Hello";
string testPromptCacheKey = BuildPromptCacheKey(userModel, "verify", "verify");
string testRequestData = BuildChatPayload(
userModel,
testSystemMsg,
testUserMsg,
isOfficial,
testPromptCacheKey,
promptCacheRetentionToken,
isGemini,
geminiCachedContentToken
);
string testResponse = HostUrlGetString(apiUrlLocal, GPT_UserAgent, verifyHeaders, testRequestData);
if (testResponse != "") {
JsonReader testReader;
JsonValue testRoot;
if (testReader.parse(testResponse, testRoot)) {
if (testRoot.isObject() && testRoot["choices"].isArray() && testRoot["choices"].size() > 0) {
GPT_selected_model = userModel;
GPT_api_key = storedApiKey;
HostSaveString("gpt_api_key", GPT_api_key);
HostSaveString("gpt_selected_model", GPT_selected_model);
HostSaveString("gpt_apiUrl", apiUrlLocal);
HostSaveString("gpt_delay_ms", GPT_delay_ms);
HostSaveString("gpt_retry_mode", GPT_retry_mode);
GPT_context_cache_mode = normalizedCacheMode;
HostSaveString("gpt_context_cache_mode", GPT_context_cache_mode);
GPT_prompt_cache_retention = promptCacheRetentionToken;
HostSaveString("gpt_prompt_cache_retention", GPT_prompt_cache_retention);
GPT_gemini_cached_content = geminiCachedContentToken;
HostSaveString("gpt_gemini_cached_content", GPT_gemini_cached_content);
HostSaveString("gpt_small_model", GPT_small_model);
HostSaveString("gpt_check_hallucination", GPT_check_hallucination);
GPT_context_cache_disabled_for_session = false;
GPT_context_cache_disable_key = "";
return "200 ok";
} else {
if (testRoot.isObject() && testRoot["error"].isObject() && testRoot["error"]["message"].isString())
errorAccum += "Test message error: " + testRoot["error"]["message"].asString() + "\n";
else
errorAccum += "Test message response invalid.\n";
}
} else {
errorAccum += "Failed to parse test message response.\n";
}
} else {
errorAccum += "No response from server when sending test message.\n";
}
if (apiUrlLocal.find("chat/completions") == -1) {
string correctedApiUrl = apiUrlLocal + "/chat/completions";
bool correctedIsOfficial = IsOpenAIOfficialApiUrl(correctedApiUrl);
bool correctedIsGemini = IsGeminiApiUrl(correctedApiUrl);
string correctedTestRequestData = BuildChatPayload(
userModel,
testSystemMsg,
testUserMsg,
correctedIsOfficial,
testPromptCacheKey,
promptCacheRetentionToken,
correctedIsGemini,
geminiCachedContentToken
);
string correctedTestResponse = HostUrlGetString(correctedApiUrl, GPT_UserAgent, verifyHeaders, correctedTestRequestData);
if (correctedTestResponse != "") {
JsonReader correctedReader;
JsonValue correctedRoot;
if (correctedReader.parse(correctedTestResponse, correctedRoot)) {
if (correctedRoot.isObject() && correctedRoot["choices"].isArray() && correctedRoot["choices"].size() > 0) {
apiUrlLocal = correctedApiUrl;
GPT_selected_model = userModel;
GPT_api_key = storedApiKey;
HostSaveString("gpt_api_key", GPT_api_key);
HostSaveString("gpt_selected_model", GPT_selected_model);
HostSaveString("gpt_apiUrl", apiUrlLocal);
HostSaveString("gpt_delay_ms", GPT_delay_ms);
HostSaveString("gpt_retry_mode", GPT_retry_mode);
GPT_context_cache_mode = normalizedCacheMode;
HostSaveString("gpt_context_cache_mode", GPT_context_cache_mode);
GPT_prompt_cache_retention = promptCacheRetentionToken;
HostSaveString("gpt_prompt_cache_retention", GPT_prompt_cache_retention);
GPT_gemini_cached_content = geminiCachedContentToken;
HostSaveString("gpt_gemini_cached_content", GPT_gemini_cached_content);
HostSaveString("gpt_small_model", GPT_small_model);
HostSaveString("gpt_check_hallucination", GPT_check_hallucination);
GPT_context_cache_disabled_for_session = false;
GPT_context_cache_disable_key = "";
return "Warning: Your API base was auto-corrected to: " + apiUrlLocal + "\n200 ok";
} else {
if (correctedRoot.isObject() && correctedRoot["error"].isObject() && correctedRoot["error"]["message"].isString())
errorAccum += "Auto-correction test error: " + correctedRoot["error"]["message"].asString() + "\n";
else
errorAccum += "Auto-correction test response invalid.\n";
}
} else {
errorAccum += "Failed to parse auto-correction test response.\n";
}
} else {
errorAccum += "No response from server after auto-correction.\n";
}
}
if (isOfficial) {
string verifyUrl = "";
int pos = apiUrlLocal.find("chat/completions");
if (pos != -1)
verifyUrl = apiUrlLocal.substr(0, pos) + "models";
else
verifyUrl = "https://api.openai.com/v1/models";
string verifyResponse = HostUrlGetString(verifyUrl, GPT_UserAgent, verifyHeaders, "");
if (verifyResponse == "")
errorAccum += "Server connection failed: Unable to retrieve model list. Check network and API Base.\n";
else {
JsonReader reader;
JsonValue root;
if (!reader.parse(verifyResponse, root))
errorAccum += "Failed to parse model list response. Check API Base and API Key.\n";
else {
if (root.isObject() && root["error"].isObject() && root["error"]["message"].isString())
errorAccum += "API error during model list retrieval: " + root["error"]["message"].asString() + "\n";
else if (root.isObject() && root["data"].isArray()) {
bool modelFound = false;
int dataSize = root["data"].size();
for (int i = 0; i < dataSize; i++) {
JsonValue element = root["data"][i];
if (element.isObject() && element["id"].isString()) {
if (element["id"].asString() == userModel) {
modelFound = true;
break;
}
}
}
if (!modelFound)
errorAccum += "The specified model '" + userModel + "' is not available in the official API.\n";
} else
errorAccum += "Invalid format of model list response.\n";
}
}
} else {
errorAccum += "API verification via model list skipped for third-party API Base.\n";
}
if (!allowNullApiKey && Pass.length() < 20)
errorAccum += "API Key verification failed: API Key length may too short. Please verify your API Key.\n";
if (errorAccum != "")
return "API verification failed with the following issues:\n\n" + errorAccum;
return "Unknown error during API verification. Please check your network, API Key, and API Base settings.\n";
}
// Logout Interface to clear model name and API Key
void ServerLogout() {
GPT_api_key = "";
GPT_selected_model = GPT_pre_selected_model;
GPT_apiUrl = GPT_pre_apiUrl;
GPT_delay_ms = GPT_pre_delay_ms;
GPT_retry_mode = GPT_pre_retry_mode;
GPT_context_token_budget = GPT_pre_context_token_budget;
GPT_context_truncation_mode = GPT_pre_context_truncation_mode;
GPT_context_cache_mode = GPT_pre_context_cache_mode;
GPT_prompt_cache_retention = GPT_pre_prompt_cache_retention;
GPT_gemini_cached_content = GPT_pre_gemini_cached_content;
GPT_small_model = GPT_pre_small_model;
GPT_context_cache_disabled_for_session = false;
GPT_context_cache_disable_key = "";
HostSaveString("gpt_api_key", "");
HostSaveString("gpt_selected_model", GPT_selected_model);
HostSaveString("gpt_apiUrl", GPT_apiUrl);
HostSaveString("gpt_delay_ms", GPT_delay_ms);
HostSaveString("gpt_retry_mode", GPT_retry_mode);
HostSaveString("gpt_context_token_budget", GPT_context_token_budget);
HostSaveString("gpt_context_truncation_mode", GPT_context_truncation_mode);
HostSaveString("gpt_context_cache_mode", GPT_context_cache_mode);
HostSaveString("gpt_prompt_cache_retention", GPT_prompt_cache_retention);
HostSaveString("gpt_gemini_cached_content", GPT_gemini_cached_content);
HostSaveString("gpt_small_model", GPT_small_model);
HostSaveString("gpt_check_hallucination", GPT_check_hallucination);
HostPrintUTF8("Successfully logged out.\n");
}
// JSON String Escape Function
string JsonEscape(const string &in input) {
string output = input;
output.replace("\\", "\\\\");
output.replace("\"", "\\\"");
output.replace("\n", "\\n");
output.replace("\r", "\\r");
output.replace("\t", "\\t");
output.replace("/", "\\/");
return output;
}
// Function to estimate token count based on character length
int EstimateTokenCount(const string &in text) {
return int(float(text.length()) / 4);
}
// Function to get the model's maximum context length
int GetModelMaxTokens(const string &in modelName) {
EnsureTokenRulesLoaded();
string trimmedModel = modelName.Trim();
if (trimmedModel == "")
return GPT_default_model_token_limit;
for (uint i = 0; i < GPT_token_rule_types.length(); i++) {
string matchType = GPT_token_rule_types[i];
string matchValue = GPT_token_rule_values[i];
int limit = GPT_token_rule_limits[i];
if (matchType == "prefix") {
if (trimmedModel.length() >= matchValue.length() &&
trimmedModel.substr(0, matchValue.length()) == matchValue)
return limit;
} else if (matchType == "contains") {
if (trimmedModel.find(matchValue) != -1)
return limit;
} else if (matchType == "equals") {
if (trimmedModel == matchValue)
return limit;
}
}
return GPT_default_model_token_limit;
}
// Translation Function
string Translate(string Text, string &in SrcLang, string &in DstLang) {
RefreshConfiguration();
if (GPT_api_key == "") {
HostPrintUTF8("API Key not configured. Please enter it in the settings menu.\n");
return "";
}
if (DstLang == "" || DstLang == "Auto Detect") {
HostPrintUTF8("Target language not specified. Please select a target language.\n");
return "";
}
if (SrcLang == "" || SrcLang == "Auto Detect") {
SrcLang = "";
}
GPT_subtitleHistory.insertLast(Text);
int maxTokens = GetModelMaxTokens(GPT_selected_model);
int safeBudget = maxTokens - 1000;
if (safeBudget < 0)
safeBudget = maxTokens;
if (safeBudget < 0)
safeBudget = 0;
int configuredBudget = ParseInt(GPT_context_token_budget);
if (configuredBudget <= 0 || configuredBudget > safeBudget)
configuredBudget = safeBudget;
string truncMode = GPT_context_truncation_mode;
bool useSmartTrim = EqualsIgnoreCase(truncMode, "smart_trim");
int currentTokens = EstimateTokenCount(Text);
if (currentTokens < 0)
currentTokens = 0;
int availableForContext = safeBudget - currentTokens;
if (availableForContext < 0)
availableForContext = 0;
if (availableForContext > configuredBudget)
availableForContext = configuredBudget;
array<string> contextSegments;
int usedContextTokens = 0;
int idx = int(GPT_subtitleHistory.length()) - 2;
while (idx >= 0 && usedContextTokens < availableForContext) {
string subtitle = GPT_subtitleHistory[idx];
int subtitleTokens = EstimateTokenCount(subtitle);
if (subtitleTokens <= 0) {
idx--;
continue;
}
if (usedContextTokens + subtitleTokens <= availableForContext) {
contextSegments.insertAt(0, subtitle);
usedContextTokens += subtitleTokens;
} else if (useSmartTrim) {
int remainingTokens = availableForContext - usedContextTokens;
if (remainingTokens > 0) {
int charBudget = remainingTokens * 4;
int subtitleLength = int(subtitle.length());
if (charBudget < subtitleLength)
subtitle = subtitle.substr(subtitleLength - charBudget, charBudget);
contextSegments.insertAt(0, subtitle);
}
usedContextTokens = availableForContext;
break;
} else {
break;
}
idx--;
}
string context = "";
for (uint ctxIndex = 0; ctxIndex < contextSegments.length(); ctxIndex++) {
if (ctxIndex > 0)
context += "\n";
context += contextSegments[ctxIndex];
}
int historyBudget = configuredBudget;
if (historyBudget <= 0)
historyBudget = safeBudget;
if (historyBudget < 0)
historyBudget = 0;
int historyTarget = historyBudget > 0 ? int(historyBudget / 16) : 0;
if (historyTarget < 96)
historyTarget = 96;
if (historyTarget > 2048)
historyTarget = 2048;
int shrinkTarget = historyTarget - 64;
if (shrinkTarget < 64)
shrinkTarget = historyTarget / 2;
if (shrinkTarget < 32)
shrinkTarget = 32;
const uint historyTargetCount = historyTarget > 0 ? uint(historyTarget) : 0;
const uint shrinkTargetCount = shrinkTarget > 0 ? uint(shrinkTarget) : 0;
if (GPT_subtitleHistory.length() > historyTargetCount) {
while (GPT_subtitleHistory.length() > shrinkTargetCount) {
GPT_subtitleHistory.removeAt(0);
}
}
string sourceLabel = (SrcLang == "" ? "Auto Detect" : SrcLang);
string targetLangCode = DstLang;
string targetLabel = targetLangCode;
string systemMsg =
"You are an expert subtitle translate tool with a deep understanding of both language and culture. "
"Based on contextual clues, you provide translations that capture not only the literal meaning but also the nuanced metaphors, euphemisms, and cultural symbols embedded in the dialogue. "
"Your translations reflect the intended tone and cultural context, ensuring that every subtle reference and idiomatic expression is accurately conveyed. "
"I will provide you with relevant context when available; never echo that context in the output.\n\n"
"Rules:\n"
"1. Output the translation only.\n"
"2. Do NOT output extra comments or explanations.\n"
"3. Do NOT use any special characters or formatting in the translation.\n\n"
"Source language: " + sourceLabel + "\n"
"Target language: " + targetLabel + "\n";
if (context != "") {
systemMsg += "\nSubtitle context (older to newer):\n" + context + "\n\nDo not translate or repeat any context entries.";
}
string userMsg = Text;
bool isOpenAIOfficial = IsOpenAIOfficialApiUrl(GPT_apiUrl);
bool isGeminiApi = IsGeminiApiUrl(GPT_apiUrl);
bool enableOpenAIPromptCacheControls = isOpenAIOfficial;
string promptCacheRetention = isOpenAIOfficial ? NormalizePromptCacheRetention(GPT_prompt_cache_retention) : "";
string promptCacheKey = isOpenAIOfficial ? BuildPromptCacheKey(GPT_selected_model, sourceLabel, targetLabel) : "";
string requestData = BuildChatPayload(
GPT_selected_model,
systemMsg,
userMsg,
enableOpenAIPromptCacheControls,
promptCacheKey,
promptCacheRetention,
isGeminiApi,
GPT_gemini_cached_content
);
string headers = BuildAuthHeaders(GPT_api_key);
int delayInt = ParseInt(GPT_delay_ms);
int retryModeInt = ParseInt(GPT_retry_mode);
string cacheSessionKey = GPT_context_cache_mode + "|" + GPT_apiUrl + "|" + GPT_selected_model + "|" + promptCacheRetention + "|" + GPT_gemini_cached_content;
if (cacheSessionKey != GPT_context_cache_disable_key)
GPT_context_cache_disabled_for_session = false;
GPT_context_cache_disable_key = cacheSessionKey;
string translation = "";
string response = "";
// Unified Retry Loop: Handles Network, API Errors, and Hallucinations
int maxRetries = retryModeInt;
if (maxRetries < 0) maxRetries = 0;
if (retryModeInt == 2) maxRetries = 999999; // retry until success (infinite-ish)
if (retryModeInt == 3) maxRetries = 999999; // retry until success delayed
int attempts = 0;
while (attempts <= maxRetries) {
if (attempts > 0) {
if (delayInt > 0) HostSleep(delayInt);
}
translation = "";
response = "";
// Try Context Caching first if enabled
if (GPT_context_cache_mode != "off" && !GPT_context_cache_disabled_for_session && ShouldTryResponsesEndpoint(GPT_apiUrl)) {
string responsesUrl = DeriveResponsesUrl(GPT_apiUrl);
string cacheFailure = "";
if (responsesUrl != "") {
translation = TranslateWithResponses(responsesUrl, headers, GPT_selected_model, systemMsg, Text, promptCacheKey, promptCacheRetention, cacheFailure);
} else {
cacheFailure = "Unable to resolve responses endpoint from current API URL.";
}
if (translation != "") {
// Check for Hallucination
if (GPT_check_hallucination == "1" && IsOverlongTranslation(translation, Text)) {
HostPrintUTF8("Hallucination detected (Length > 5x). Retrying...\n");
attempts++;
continue;
} break; // Success
}
if (!GPT_context_cache_disabled_for_session) {
string fallbackMessage = cacheFailure == "" ? "Context caching failed." : "Context caching failed: " + cacheFailure;
if (ShouldDisableContextCacheForSession(cacheFailure)) {
// Permanent capability errors should disable responses attempts for this session.
GPT_context_cache_disabled_for_session = true;
HostPrintUTF8(fallbackMessage + "\nUsing chat completions for this session.\n");
} else {
HostPrintUTF8(fallbackMessage + "\nUsing chat completions for this request.\n");
}
}
}
// Standard Chat Completion
response = ExecuteSimple(GPT_apiUrl, headers, requestData);
if (response == "") {
// Network failure
attempts++;
continue;
}
JsonReader Reader;
JsonValue Root;
if (!Reader.parse(response, Root)) {
HostPrintUTF8("Failed to parse API response.\n");
attempts++;
continue;
}
JsonValue choices = Root["choices"];
if (choices.isArray() && choices.size() > 0 &&
choices[0].isObject() &&
choices[0]["message"].isObject() &&
choices[0]["message"]["content"].isString()) {
translation = choices[0]["message"]["content"].asString();
// Check for Hallucination
if (GPT_check_hallucination == "1" && IsOverlongTranslation(translation, Text)) {
HostPrintUTF8("Hallucination detected (Length > 5x). Retrying...\n");
attempts++;
continue;
}
break; // Success
} else if (Root.isObject() &&
Root["error"].isObject() &&
Root["error"]["message"].isString()) {
string errorMessage = Root["error"]["message"].asString();
HostPrintUTF8("API Error: " + errorMessage + "\n");
string loweredError = ToLower(errorMessage);
if (enableOpenAIPromptCacheControls && loweredError.find("prompt_cache") != -1) {
enableOpenAIPromptCacheControls = false;
requestData = BuildChatPayload(
GPT_selected_model,
systemMsg,
userMsg,
false,
promptCacheKey,
promptCacheRetention,
isGeminiApi,
GPT_gemini_cached_content
);
HostPrintUTF8("Prompt cache control fields are unsupported on this endpoint. Retrying without them.\n");
}
// API returned an error (e.g. rate limit, context length).
// Should we retry? Usually yes for rate limits, maybe no for invalid request.
// For simplicity and robustness, we retry.
attempts++;
continue;
} else {
HostPrintUTF8("Translation failed. Unknown response format.\n");
attempts++;
continue;
}
}
if (translation == "") {
string failureMessage = "Translation failed after retries.";
if (response != "") failureMessage += " Last response: " + response;
HostPrintUTF8(failureMessage + "\n");
return FormatFailureTranslation(response, "Translation failed. Check settings and network.");
}
bool isFailureTranslation = translation.length() >= GPT_CTX_TRANSLATION_FAILURE_WARNING_PREFIX.length() &&
translation.substr(0, GPT_CTX_TRANSLATION_FAILURE_WARNING_PREFIX.length()) == GPT_CTX_TRANSLATION_FAILURE_WARNING_PREFIX;
if (!isFailureTranslation && GPT_selected_model.find("gemini") != -1) {
while (translation.length() > 0 && translation.substr(translation.length() - 1, 1) == "\n") {
translation = translation.substr(0, translation.length() - 1);
}
}
if (!isFailureTranslation && (targetLangCode == "fa" || targetLangCode == "ar" || targetLangCode == "he")) {
string UNICODE_RLE = "\u202B";
translation = UNICODE_RLE + translation;
}
SrcLang = "UTF8";
DstLang = "UTF8";
return translation.Trim();
}
string FormatFailureTranslation(const string &in rawResponse, const string &in fallbackMessage) {
string detail = rawResponse.Trim();
if (detail == "")
detail = fallbackMessage;
return GPT_CTX_TRANSLATION_FAILURE_WARNING_PREFIX + detail;
}
// Plugin Initialization
void OnInitialize() {
HostPrintUTF8("ChatGPT translation plugin loaded.\n");
RefreshConfiguration();
GPT_context_cache_disabled_for_session = false;
GPT_context_cache_disable_key = "";
if (GPT_api_key != "") {
HostPrintUTF8("Saved API Key, model name, and API URL loaded.\n");
}
}
// Plugin Finalization
void OnFinalize() {
HostPrintUTF8("ChatGPT translation plugin unloaded.\n");
}
string ToLower(const string &in s) {
return s.MakeLower();
}
string NormalizeCacheMode(const string &in mode) {
string trimmed = mode.Trim();
if (trimmed == "")
return "auto";
string lower = ToLower(trimmed);
if (lower == "off" || lower == "disable" || lower == "disabled" || lower == "chat")
return "off";
return "auto";
}
string NormalizePromptCacheRetention(const string &in retention) {
string trimmed = retention.Trim();
if (trimmed == "")
return "";
string lower = ToLower(trimmed);
if (lower == "24h")
return "24h";
if (lower == "in_memory" || lower == "in-memory" || lower == "memory" || lower == "mem" || lower == "default")
return "in-memory";
return "";
}
bool IsOpenAIOfficialApiUrl(const string &in apiUrl) {
return ToLower(apiUrl).find("api.openai.com") != -1;
}
bool IsGeminiApiUrl(const string &in apiUrl) {
string lower = ToLower(apiUrl);
return lower.find("generativelanguage.googleapis.com") != -1;