Skip to content

Commit 37485dc

Browse files
fix: improve OpenAI proxy request handling and add Claude PR workflow
- Fix hyper request cloning by manually reconstructing requests - Remove usage of private error.kind() method - Optimize request retry logic with exponential backoff - Add GitHub Actions workflow for Claude PR assistant - Apply proper code formatting 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>
1 parent db40038 commit 37485dc

File tree

2 files changed

+118
-8
lines changed

2 files changed

+118
-8
lines changed

.github/workflows/claude.yml

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
name: Claude Code
2+
3+
on:
4+
issue_comment:
5+
types: [created]
6+
pull_request_review_comment:
7+
types: [created]
8+
issues:
9+
types: [opened, assigned]
10+
pull_request_review:
11+
types: [submitted]
12+
13+
jobs:
14+
claude:
15+
if: |
16+
(github.event_name == 'issue_comment' && contains(github.event.comment.body, '@claude')) ||
17+
(github.event_name == 'pull_request_review_comment' && contains(github.event.comment.body, '@claude')) ||
18+
(github.event_name == 'pull_request_review' && contains(github.event.review.body, '@claude')) ||
19+
(github.event_name == 'issues' && (contains(github.event.issue.body, '@claude') || contains(github.event.issue.title, '@claude')))
20+
runs-on: ubuntu-latest
21+
permissions:
22+
contents: read
23+
pull-requests: read
24+
issues: read
25+
id-token: write
26+
steps:
27+
- name: Checkout repository
28+
uses: actions/checkout@v4
29+
with:
30+
fetch-depth: 1
31+
32+
- name: Run Claude Code
33+
id: claude
34+
uses: anthropics/claude-code-action@beta
35+
with:
36+
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
37+

src/web/openai.rs

Lines changed: 81 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,15 @@ async fn proxy_openai(
9898
};
9999
let openai_api_base = &state.openai_api_base;
100100

101-
// Create a new hyper client
101+
// Create a new hyper client with extended timeouts
102102
let https = HttpsConnector::new();
103103
let client = Client::builder()
104-
.pool_idle_timeout(Duration::from_secs(15))
104+
.pool_idle_timeout(Duration::from_secs(60)) // Increased from 15 to 60 seconds
105+
.pool_max_idle_per_host(32) // Increased connection pool size
106+
.http2_keep_alive_interval(Some(Duration::from_secs(20))) // Keep connections alive
107+
.http2_keep_alive_timeout(Duration::from_secs(10)) // Timeout for keep-alive pings
108+
.http1_read_buf_exact_size(65_536) // Increased buffer size
109+
.http1_title_case_headers(true) // Improved compatibility
105110
.build::<_, Body>(https);
106111

107112
// Prepare the request to OpenAI
@@ -137,11 +142,73 @@ async fn proxy_openai(
137142
})?;
138143

139144
debug!("Sending request to OpenAI");
140-
// Send the request to OpenAI
141-
let res = client.request(req).await.map_err(|e| {
142-
error!("Failed to send request to OpenAI: {:?}", e);
143-
ApiError::InternalServerError
144-
})?;
145+
// Send the request to OpenAI with retry logic
146+
let mut attempts = 0;
147+
let max_attempts = 3;
148+
let mut _last_error = None;
149+
150+
// We need to manually clone the request since hyper::Request doesn't have try_clone
151+
// Extract the components we need for retrying
152+
let (parts, body) = req.into_parts();
153+
let method = parts.method.clone();
154+
let uri = parts.uri.clone();
155+
let headers = parts.headers.clone();
156+
let version = parts.version;
157+
158+
// Convert body to bytes so we can reuse it
159+
let body_bytes = match hyper::body::to_bytes(body).await {
160+
Ok(bytes) => bytes,
161+
Err(e) => {
162+
error!("Failed to read request body: {:?}", e);
163+
return Err(ApiError::InternalServerError);
164+
}
165+
};
166+
167+
let res = loop {
168+
attempts += 1;
169+
170+
// Rebuild the request from the saved components
171+
let mut req_builder = hyper::Request::builder()
172+
.method(method.clone())
173+
.uri(uri.clone())
174+
.version(version);
175+
176+
// Add all headers
177+
for (name, value) in headers.iter() {
178+
req_builder = req_builder.header(name, value);
179+
}
180+
181+
// Build the request with the body
182+
let request = req_builder
183+
.body(hyper::Body::from(body_bytes.clone()))
184+
.map_err(|e| {
185+
error!("Failed to build request: {:?}", e);
186+
ApiError::InternalServerError
187+
})?;
188+
189+
match client.request(request).await {
190+
Ok(response) => break response,
191+
Err(e) => {
192+
error!(
193+
"Attempt {}/{}: Failed to send request to OpenAI: {:?}",
194+
attempts, max_attempts, e
195+
);
196+
_last_error = Some(e);
197+
198+
// If we've reached max attempts, return the error
199+
if attempts >= max_attempts {
200+
error!("Max retry attempts reached. Giving up.");
201+
return Err(ApiError::InternalServerError);
202+
}
203+
204+
// Wait before retrying (exponential backoff with max delay cap of 3 seconds)
205+
let backoff_ms = 500 * 2_u64.pow(attempts as u32 - 1);
206+
let capped_backoff_ms = std::cmp::min(backoff_ms, 3000); // Cap at 3 seconds
207+
let delay = std::time::Duration::from_millis(capped_backoff_ms);
208+
tokio::time::sleep(delay).await;
209+
}
210+
}
211+
};
145212

146213
// Check if the response is successful
147214
if !res.status().is_success() {
@@ -201,12 +268,18 @@ async fn proxy_openai(
201268
processed_events
202269
}
203270
Err(e) => {
271+
// Log the error with more details to help debug
204272
error!(
205273
"Error reading response body: {:?}. Current buffer: {}",
206274
e,
207275
buffer.lock().unwrap()
208276
);
209-
vec![Ok(Event::default().data("Error reading response"))]
277+
278+
// Return a more specific error message that can help with debugging
279+
let error_msg = format!("Error reading response: {:?}", e);
280+
281+
// Send an error event to the client
282+
vec![Ok(Event::default().data(error_msg))]
210283
}
211284
}
212285
}

0 commit comments

Comments
 (0)