-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbackground.js
More file actions
216 lines (186 loc) · 7.15 KB
/
background.js
File metadata and controls
216 lines (186 loc) · 7.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
/* global chrome */
const MODEL = "gpt-4o-mini"; // or gpt-3.5-turbo-0125, etc.
// Array of possible instructions to randomize from
const POSSIBLE_INSTRUCTIONS = [
"Make the tone playful or tongue‑in‑cheek.",
"Add a random quip at the end.",
"Use corporate buzzwords ironically.",
"Include a motivational quote reference.",
"Make it sound like a LinkedIn influencer.",
"Add a hashtag suggestion.",
"Include a call-to-action phrase.",
"Use emoji in the prompt.",
"Make it sound like a thought leadership piece.",
"Include a productivity tip.",
"Add a networking reference.",
"Use startup jargon.",
"Include a career advice angle.",
"Make it sound like a personal story.",
"Add a professional development theme.",
"Include a team collaboration reference.",
"Use industry-specific terminology.",
"Add a mentorship angle.",
"Include a work-life balance theme.",
"Make it sound like a company announcement."
];
/**
* Generate a random prompt with randomized instructions and word count
*/
function generateRandomPrompt(post) {
// Random word count between 10-50 words
const minWords = 10;
const maxWords = 50;
const targetWords = Math.floor(Math.random() * (maxWords - minWords + 1)) + minWords;
// Random number of instructions (3-10)
const minInstructions = 3;
const maxInstructions = 10;
const numInstructions = Math.floor(Math.random() * (maxInstructions - minInstructions + 1)) + minInstructions;
// Shuffle and select random instructions
const shuffledInstructions = [...POSSIBLE_INSTRUCTIONS].sort(() => Math.random() - 0.5);
const selectedInstructions = shuffledInstructions.slice(0, numInstructions);
// Random tone variations
const toneVariations = [
"You are an expert prompt engineer.",
"You are a creative AI prompt designer.",
"You are a social media content strategist.",
"You are a LinkedIn ghostwriter.",
"You are a corporate communications expert."
];
const randomTone = toneVariations[Math.floor(Math.random() * toneVariations.length)];
debugLog('Generating random prompt configuration', {
targetWords,
numInstructions,
selectedInstructions: selectedInstructions.map(i => i.substring(0, 30) + '...'),
tone: randomTone
});
return `
${randomTone}
Given the following LinkedIn post, invent ONE short but plausible prompt that
*could* have produced it. It must start exactly with: Dear LLM, Write a post ...
Guidelines:
- Keep it between ${minWords}-${maxWords} words TOTAL (aim for ${targetWords} words).
${selectedInstructions.map(instruction => `- ${instruction}`).join('\n')}
- Do not quote the post verbatim; paraphrase if needed.
- Return plain text only (no code fences, no quotation marks around the result).
- Hashtags and emojis are awesome.
LinkedIn post:
"""
${post}
"""
`;
}
/**
* Debug logging function
*/
function debugLog(message, data = null) {
const timestamp = new Date().toISOString();
const logMessage = `[DearLLM Background] ${timestamp}: ${message}`;
console.log(logMessage, data || '');
}
/**
* Tiny in‑memory cache (resets when the SW goes to sleep).
* key = SHA‑256 hash of post text, value = prompt string
*/
const cache = new Map();
/**
* Hash helper (returns hex string).
*/
async function sha256(str) {
debugLog('Generating SHA-256 hash for text', { textLength: str.length });
const buf = new TextEncoder().encode(str);
const hash = await crypto.subtle.digest("SHA-256", buf);
const hashString = Array.from(new Uint8Array(hash)).map(b => b.toString(16).padStart(2, "0")).join("");
debugLog('Hash generated', { hash: hashString.substring(0, 8) + '...' });
return hashString;
}
/**
* Talks to OpenAI and returns the trimmed prompt.
*/
async function generatePrompt(postText, apiKey) {
debugLog('Starting prompt generation', { textLength: postText.length });
const hash = await sha256(postText);
if (cache.has(hash)) {
debugLog('Cache hit! Returning cached prompt', { hash: hash.substring(0, 8) + '...' });
return cache.get(hash);
}
debugLog('Cache miss, calling OpenAI API', {
model: MODEL,
textLength: postText.length,
hash: hash.substring(0, 8) + '...'
});
const body = {
model: MODEL,
messages: [
{ role: "system", content: "You are ChatGPT." },
{ role: "user", content: generateRandomPrompt(postText) }
],
temperature: 0.8, // Increased temperature for more randomness
max_tokens: 150 // Increased max tokens for longer prompts
};
try {
const res = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${apiKey}`
},
body: JSON.stringify(body)
});
debugLog('OpenAI API response received', {
status: res.status,
ok: res.ok
});
if (!res.ok) {
const errorText = await res.text();
debugLog('OpenAI API error', { status: res.status, error: errorText });
throw new Error(`OpenAI error: ${res.status} - ${errorText}`);
}
const data = await res.json();
const prompt = data.choices?.[0]?.message?.content?.trim() || "(prompt‑error)";
debugLog('Prompt generated successfully', {
promptLength: prompt.length,
prompt: prompt.substring(0, 50) + (prompt.length > 50 ? '...' : '')
});
cache.set(hash, prompt);
debugLog('Prompt cached', { cacheSize: cache.size });
return prompt;
} catch (error) {
debugLog('Error generating prompt', { error: error.message });
throw error;
}
}
/**
* Message bridge between content scripts and the service worker.
*/
chrome.runtime.onMessage.addListener((msg, sender, sendResponse) => {
debugLog('Message received from content script', {
action: msg.action,
sender: sender.tab?.url,
textLength: msg.text?.length || 0
});
if (msg.action !== "reversePrompt") {
debugLog('Ignoring message - not a reversePrompt action');
return;
}
chrome.storage.local.get("openai_api_key", async ({ openai_api_key }) => {
if (!openai_api_key) {
debugLog('Missing API key in storage');
sendResponse({ error: "Missing API key. Set it in the extension options." });
return;
}
debugLog('API key found, generating prompt');
generatePrompt(msg.text, openai_api_key)
.then(prompt => {
debugLog('Sending prompt response to content script', { promptLength: prompt.length });
sendResponse({ prompt });
})
.catch(err => {
debugLog('Error in prompt generation, sending error response', { error: err.message });
sendResponse({ error: err.message });
});
});
// indicate we'll reply asynchronously
return true;
});
// Service worker startup logging
debugLog('Background service worker started');