Skip to content

Commit c025253

Browse files
myelinated-wackerowclaudewackerow
committed
refactor: upgrade to gemini-3.1-pro-preview, fresh run
- Model: gemini-3.1-pro-preview (best available) - Safety: BLOCK_NONE on all harm categories - Token tracking + cost estimate in summary - Cleared all Tier 1 flash translations for full re-generation with pro model - 519 terms x 24 languages from scratch Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> Co-Authored-By: wackerow <54227730+wackerow@users.noreply.github.com>
1 parent 32d9722 commit c025253

26 files changed

Lines changed: 34 additions & 117506 deletions

scripts/generate-translations-sdk.ts

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -417,6 +417,14 @@ interface FailedBatch {
417417
reason: string
418418
}
419419

420+
// Token tracking for cost estimation
421+
// Gemini 3.1 Pro pricing (approximate as of 2026-03):
422+
// Input: ~$1.25 / 1M tokens, Output: ~$10.00 / 1M tokens
423+
const TOKEN_COST_INPUT_PER_M = 1.25
424+
const TOKEN_COST_OUTPUT_PER_M = 10.0
425+
let totalInputTokens = 0
426+
let totalOutputTokens = 0
427+
420428
/**
421429
* Semaphore-based concurrency limiter.
422430
* All languages share one pool of CONCURRENCY slots.
@@ -491,7 +499,7 @@ async function processBatch(
491499
for (let attempt = 1; attempt <= MAX_RETRIES; attempt++) {
492500
try {
493501
const response = await ai.models.generateContent({
494-
model: "gemini-2.5-pro",
502+
model: "gemini-3.1-pro-preview",
495503
contents: prompt,
496504
config: {
497505
safetySettings: [
@@ -515,6 +523,13 @@ async function processBatch(
515523
},
516524
})
517525

526+
// Track token usage
527+
const usage = response.usageMetadata
528+
if (usage) {
529+
totalInputTokens += usage.promptTokenCount ?? 0
530+
totalOutputTokens += usage.candidatesTokenCount ?? 0
531+
}
532+
518533
const text = response.text ?? ""
519534

520535
if (!text.trim()) {
@@ -703,13 +718,31 @@ async function main(): Promise<void> {
703718
}
704719
}
705720

721+
// Cost estimate
722+
const inputCost = (totalInputTokens / 1_000_000) * TOKEN_COST_INPUT_PER_M
723+
const outputCost = (totalOutputTokens / 1_000_000) * TOKEN_COST_OUTPUT_PER_M
724+
const totalCost = inputCost + outputCost
725+
726+
log("")
727+
log("TOKEN USAGE:")
728+
log(` Input tokens: ${totalInputTokens.toLocaleString()}`)
729+
log(` Output tokens: ${totalOutputTokens.toLocaleString()}`)
730+
log(
731+
` Estimated cost: $${totalCost.toFixed(2)} (input: $${inputCost.toFixed(2)}, output: $${outputCost.toFixed(2)})`
732+
)
733+
706734
// Write summary JSON
707735
const summary = {
708736
timestamp: new Date().toISOString(),
709737
totalGenerated,
710738
totalFailed,
711739
totalSkipped,
712740
failedBatches,
741+
tokens: {
742+
input: totalInputTokens,
743+
output: totalOutputTokens,
744+
estimatedCostUsd: Math.round(totalCost * 100) / 100,
745+
},
713746
languageStatus: Object.fromEntries(
714747
LANGUAGES.map((l) => {
715748
const count = Object.keys(loadExistingTranslations(l.code)).length

0 commit comments

Comments
 (0)