Skip to content

Commit 9e05e40

Browse files
committed
refactor: ruff linting
1 parent 6aff1d9 commit 9e05e40

File tree

14 files changed

+77
-192
lines changed

14 files changed

+77
-192
lines changed

api.py

Lines changed: 14 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -183,9 +183,7 @@ def get_collection():
183183
try:
184184
collection = client.get_collection(COLLECTION_NAME)
185185
doc_count = collection.count()
186-
logger.info(
187-
f"Connected to collection '{COLLECTION_NAME}' with {doc_count} documents"
188-
)
186+
logger.info(f"Connected to collection '{COLLECTION_NAME}' with {doc_count} documents")
189187
except ValueError:
190188
logger.info(f"Creating new collection '{COLLECTION_NAME}'")
191189
collection = client.create_collection(COLLECTION_NAME)
@@ -361,9 +359,7 @@ async def query(request: QueryRequest):
361359
"metadata": {
362360
"total_chunks": collection.count(),
363361
"query": request.query,
364-
"embedding_type": (
365-
"mock" if MOCK_EMBEDDINGS or not openai_client else "openai"
366-
),
362+
"embedding_type": ("mock" if MOCK_EMBEDDINGS or not openai_client else "openai"),
367363
},
368364
}
369365

@@ -401,14 +397,10 @@ class AgentQueryResponse(BaseModel):
401397

402398

403399
@app.post("/agent/query", response_model=AgentQueryResponse)
404-
async def agent_query(
405-
req: AgentQueryRequest, deps: None = Depends(verify_dependencies)
406-
):
400+
async def agent_query(req: AgentQueryRequest, deps: None = Depends(verify_dependencies)):
407401
"""Agent-optimized query endpoint for Cursor integration."""
408402
start_time = time.time()
409-
logger.info(
410-
f"Agent query received: '{req.query}', top_k={req.top_k}, context={req.context}"
411-
)
403+
logger.info(f"Agent query received: '{req.query}', top_k={req.top_k}, context={req.context}")
412404

413405
# Check if mock mode is requested for this query
414406
use_mock = MOCK_EMBEDDINGS
@@ -523,9 +515,7 @@ async def agent_query(
523515
suggested_prompt += f"\n--- Context {i} ({chunk['source']}) ---\n"
524516
suggested_prompt += f"{chunk['content']}\n"
525517

526-
suggested_prompt += (
527-
"\nBased on the above context, please help with the query."
528-
)
518+
suggested_prompt += "\nBased on the above context, please help with the query."
529519
logger.debug(f"Generated prompt in {time.time() - prompt_start:.2f}s")
530520

531521
response_time = time.time() - start_time
@@ -623,9 +613,7 @@ class IndexRequest(BaseModel):
623613
project_path: str = Field(
624614
"./whk-ignition-scada", description="Path to the Ignition project directory"
625615
)
626-
rebuild: bool = Field(
627-
False, description="Whether to rebuild the index from scratch"
628-
)
616+
rebuild: bool = Field(False, description="Whether to rebuild the index from scratch")
629617
skip_rate_limiting: bool = Field(
630618
False, description="Skip rate limiting for faster processing (use with caution)"
631619
)
@@ -713,9 +701,7 @@ async def generate_embedding_with_backoff(text, max_retries=5, initial_backoff=1
713701
):
714702
retries += 1
715703
if retries > max_retries:
716-
logger.error(
717-
f"Max retries reached for rate limit. Final error: {e!s}"
718-
)
704+
logger.error(f"Max retries reached for rate limit. Final error: {e!s}")
719705
raise
720706

721707
logger.info(
@@ -781,7 +767,7 @@ def chunk_by_characters(text, max_chunk_size):
781767
for file_index, file_path in enumerate(json_files):
782768
file_start_time = time.time()
783769
try:
784-
logger.info(f"Processing {file_path}... [{file_index+1}/{total_files}]")
770+
logger.info(f"Processing {file_path}... [{file_index + 1}/{total_files}]")
785771
with open(file_path, encoding="utf-8") as f:
786772
content = f.read()
787773

@@ -844,10 +830,7 @@ def chunk_by_characters(text, max_chunk_size):
844830
)
845831

846832
# For array-type JSONs, split at the top level
847-
if (
848-
isinstance(json_content, list)
849-
and len(json_content) > 1
850-
):
833+
if isinstance(json_content, list) and len(json_content) > 1:
851834
logger.info(
852835
f"Using array-level chunking for JSON array with {len(json_content)} items"
853836
)
@@ -876,10 +859,7 @@ def chunk_by_characters(text, max_chunk_size):
876859
)
877860
sub_chunks.extend(item_chunks)
878861
# If adding this would exceed limit, create a new chunk
879-
elif (
880-
current_tokens + item_tokens
881-
> hard_token_limit
882-
):
862+
elif current_tokens + item_tokens > hard_token_limit:
883863
array_str = json.dumps(current_array)
884864
sub_chunks.append(array_str)
885865
current_array = [item]
@@ -901,9 +881,7 @@ def chunk_by_characters(text, max_chunk_size):
901881
content,
902882
int(hard_token_limit / 1.2),
903883
)
904-
chunks = [
905-
(chunk, metadata) for chunk in text_chunks
906-
]
884+
chunks = [(chunk, metadata) for chunk in text_chunks]
907885
except json.JSONDecodeError:
908886
# If JSON parsing fails, use character-level chunking
909887
text_chunks = chunk_by_characters(
@@ -928,14 +906,10 @@ def chunk_by_characters(text, max_chunk_size):
928906
for i, (chunk_text, chunk_metadata) in enumerate(chunks):
929907
try:
930908
# Generate embedding with backoff
931-
embedding = await generate_embedding_with_backoff(
932-
chunk_text
933-
)
909+
embedding = await generate_embedding_with_backoff(chunk_text)
934910

935911
# Create a unique ID for this chunk
936-
file_path_replaced = file_path.replace("/", "_").replace(
937-
"\\", "_"
938-
)
912+
file_path_replaced = file_path.replace("/", "_").replace("\\", "_")
939913
chunk_id = f"{file_path_replaced}_chunk_{i}"
940914

941915
# Add to collection
@@ -948,9 +922,7 @@ def chunk_by_characters(text, max_chunk_size):
948922

949923
chunk_count += 1
950924
except Exception as e:
951-
logger.error(
952-
f"Error processing chunk {i} of {file_path}: {e!s}"
953-
)
925+
logger.error(f"Error processing chunk {i} of {file_path}: {e!s}")
954926

955927
doc_count += 1
956928
logger.info(f"Indexed {file_path} into {len(chunks)} chunks")

cursor_agent.py

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,9 @@ def get_cursor_context(
146146
context_str += f"--- Context {i}: {source} ---\n"
147147
context_str += f"{content}\n\n"
148148

149-
context_str += "Use the above context to help answer the query or generate appropriate code.\n"
149+
context_str += (
150+
"Use the above context to help answer the query or generate appropriate code.\n"
151+
)
150152
return context_str
151153

152154
# No context available
@@ -182,9 +184,7 @@ def get_ignition_tag_info(tag_name: str) -> dict:
182184
}
183185

184186
# Get tag information from the RAG system
185-
rag_results = query_rag(
186-
query=f"Tag configuration for {tag_name}", top_k=1, filter_type="tag"
187-
)
187+
rag_results = query_rag(query=f"Tag configuration for {tag_name}", top_k=1, filter_type="tag")
188188

189189
# Extract tag info from the context
190190
context_chunks = rag_results.get("context_chunks", [])
@@ -208,9 +208,7 @@ def get_ignition_tag_info(tag_name: str) -> dict:
208208
return {"error": f"Could not parse tag information for {tag_name}"}
209209

210210

211-
def get_ignition_view_component(
212-
view_name: str, component_name: Optional[str] = None
213-
) -> dict:
211+
def get_ignition_view_component(view_name: str, component_name: Optional[str] = None) -> dict:
214212
"""
215213
Get information about a specific view or component in an Ignition project.
216214
@@ -223,9 +221,7 @@ def get_ignition_view_component(
223221
"""
224222
# Check if mock mode is enabled
225223
if USE_MOCK_EMBEDDINGS:
226-
logger.info(
227-
f"Using mock data for view: {view_name}, component: {component_name}"
228-
)
224+
logger.info(f"Using mock data for view: {view_name}, component: {component_name}")
229225
# Create a mock response
230226
if component_name:
231227
return {
@@ -238,9 +234,7 @@ def get_ignition_view_component(
238234
"width": 200,
239235
"height": 150,
240236
"text": (
241-
f"Mock {component_name}"
242-
if "label" in component_name.lower()
243-
else None
237+
f"Mock {component_name}" if "label" in component_name.lower() else None
244238
),
245239
},
246240
"mock_used": True,
@@ -281,11 +275,7 @@ def get_ignition_view_component(
281275
content_obj = json.loads(content.strip())
282276

283277
# For component search
284-
if (
285-
component_name
286-
and "name" in content_obj
287-
and content_obj["name"] == component_name
288-
):
278+
if component_name and "name" in content_obj and content_obj["name"] == component_name:
289279
return content_obj
290280

291281
# For view search

cursor_client.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def get_rag_context(query, current_file=None, top_k=3, filter_type=None):
6868
metadata = chunk.get("metadata", {})
6969
file_path = metadata.get("filepath", "Unknown file")
7070

71-
context_text += f"## Context {i+1}: {source} ({file_path})\n"
71+
context_text += f"## Context {i + 1}: {source} ({file_path})\n"
7272
context_text += "```json\n"
7373
context_text += content + "\n"
7474
context_text += "```\n\n"
@@ -101,13 +101,9 @@ def main():
101101
parser = argparse.ArgumentParser(description="Cursor Client for Ignition RAG")
102102
parser.add_argument("query", help="The query to search for")
103103
parser.add_argument("--file", "-f", help="Path to the current file")
104-
parser.add_argument(
105-
"--top-k", "-k", type=int, default=3, help="Number of results to return"
106-
)
104+
parser.add_argument("--top-k", "-k", type=int, default=3, help="Number of results to return")
107105
parser.add_argument("--filter", help="Filter by document type (perspective or tag)")
108-
parser.add_argument(
109-
"--output", "-o", help="Output format (text or json)", default="text"
110-
)
106+
parser.add_argument("--output", "-o", help="Output format (text or json)", default="text")
111107

112108
args = parser.parse_args()
113109

indexer.py

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -167,9 +167,7 @@ def load_json_files(file_paths: List[str]) -> List[Dict[str, Any]]:
167167
return documents
168168

169169

170-
def chunk_perspective_view(
171-
view_json: Dict[str, Any], view_meta: Dict[str, str]
172-
) -> List[tuple]:
170+
def chunk_perspective_view(view_json: Dict[str, Any], view_meta: Dict[str, str]) -> List[tuple]:
173171
"""Split a Perspective view JSON into semantically meaningful chunks."""
174172
chunks = []
175173

@@ -230,9 +228,7 @@ def process_component(comp, view_meta, chunks, parent_path=""):
230228
comp_copy = {k: v for k, v in comp.items() if k != "children"}
231229
comp_without_children = json.dumps(comp_copy, ensure_ascii=False)
232230
if len(enc.encode(comp_without_children)) <= MAX_TOKENS:
233-
chunks.append(
234-
(comp_without_children, {**comp_meta, "section": "properties"})
235-
)
231+
chunks.append((comp_without_children, {**comp_meta, "section": "properties"}))
236232
else:
237233
# Split properties if no children but still too large
238234
props = list(comp.items())
@@ -341,13 +337,11 @@ def generate_embeddings(texts: List[str], batch_size: int = 20) -> List[List[flo
341337
batch = texts[i : i + batch_size]
342338
try:
343339
# Updated for OpenAI v1.0+
344-
response = client.embeddings.create(
345-
model="text-embedding-ada-002", input=batch
346-
)
340+
response = client.embeddings.create(model="text-embedding-ada-002", input=batch)
347341
batch_embeddings = [item.embedding for item in response.data]
348342
embeddings.extend(batch_embeddings)
349343
print(
350-
f"Generated embeddings for batch {i//batch_size + 1}/{(len(texts) + batch_size - 1) // batch_size}"
344+
f"Generated embeddings for batch {i // batch_size + 1}/{(len(texts) + batch_size - 1) // batch_size}"
351345
)
352346
except Exception as e:
353347
print(f"Error generating embeddings for batch starting at index {i}: {e}")
@@ -422,15 +416,11 @@ def load_last_index_time() -> float:
422416
@app.command()
423417
def main(
424418
path: str = typer.Argument(..., help="Path to the Ignition project directory"),
425-
rebuild: bool = typer.Option(
426-
False, "--rebuild", help="Rebuild the index from scratch"
427-
),
419+
rebuild: bool = typer.Option(False, "--rebuild", help="Rebuild the index from scratch"),
428420
changed_only: bool = typer.Option(
429421
False, "--changed-only", help="Only index files changed since last run"
430422
),
431-
file: Optional[str] = typer.Option(
432-
None, "--file", help="Index only a specific file"
433-
),
423+
file: Optional[str] = typer.Option(None, "--file", help="Index only a specific file"),
434424
mock: bool = typer.Option(
435425
False,
436426
"--mock",
@@ -468,9 +458,7 @@ def main(
468458
elif changed_only and not rebuild:
469459
# Index only files changed since last run
470460
last_index_time = load_last_index_time()
471-
json_files = [
472-
f for f in all_json_files if os.path.getmtime(f) > last_index_time
473-
]
461+
json_files = [f for f in all_json_files if os.path.getmtime(f) > last_index_time]
474462
print(
475463
f"Found {len(json_files)} changed files since {datetime.fromtimestamp(last_index_time)}"
476464
)

logger.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,14 +26,18 @@
2626

2727
# Add file handler for general logs
2828
file_handler = RotatingFileHandler(
29-
os.path.join(LOG_DIR, "app.log"), maxBytes=10485760, backupCount=10 # 10MB
29+
os.path.join(LOG_DIR, "app.log"),
30+
maxBytes=10485760,
31+
backupCount=10, # 10MB
3032
)
3133
file_handler.setFormatter(log_format)
3234
logger.addHandler(file_handler)
3335

3436
# Add file handler for errors only
3537
error_handler = RotatingFileHandler(
36-
os.path.join(LOG_DIR, "error.log"), maxBytes=10485760, backupCount=10 # 10MB
38+
os.path.join(LOG_DIR, "error.log"),
39+
maxBytes=10485760,
40+
backupCount=10, # 10MB
3741
)
3842
error_handler.setLevel(logging.ERROR)
3943
error_handler.setFormatter(log_format)
@@ -69,7 +73,5 @@ async def __call__(self, scope, receive, send):
6973
await self.app(scope, receive, send)
7074
self.logger.info(f"Response {request_id}: {method} {path} completed")
7175
except Exception as e:
72-
self.logger.error(
73-
f"Error {request_id}: {method} {path} - {e!s}", exc_info=True
74-
)
76+
self.logger.error(f"Error {request_id}: {method} {path} - {e!s}", exc_info=True)
7577
raise

0 commit comments

Comments
 (0)