Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file.
74 changes: 74 additions & 0 deletions examples/child_process_bridge/call_from_nodejs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import asyncio
import inspect
import json
import sys

from reddit_user_generate import generate_user_data

async def handle_request(request):
try:
method = request['method']
req_id = request['id']

# retrieve function call in python file
method_func = globals()[method]

# check params
sig = inspect.signature(method_func)
param_count = len(sig.parameters)

# call function with/without params
if param_count == 0:
result = await method_func()
else:
result = await method_func(request["params"])

return {
"id": req_id,
"result": result,
"error": None
}
except Exception as e:
return {
"id": req_id if 'id' in request else None,
"result": None,
"error": str(e)
}

async def main():
loop = asyncio.get_event_loop()

# create async stdin reader
reader = asyncio.StreamReader()
protocol = asyncio.StreamReaderProtocol(reader)
await loop.connect_read_pipe(lambda: protocol, sys.stdin)

# listening stdin: function call with params
while True:
line = await reader.readline()
if not line:
break

try:
request = json.loads(line.decode())
# create async task
task = asyncio.create_task(handle_request(request))

# callback with response
def send_response(fut):
response = fut.result()
json_response = json.dumps(response) + "\n"
sys.stdout.write(json_response)
sys.stdout.flush()

task.add_done_callback(send_response)

except json.JSONDecodeError:
error_response = json.dumps({
"error": "Invalid JSON format"
}) + "\n"
sys.stdout.write(error_response)
sys.stdout.flush()

if __name__ == "__main__":
asyncio.run(main())
139 changes: 139 additions & 0 deletions examples/child_process_bridge/reddit_user_generate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
import json
import random

from openai import OpenAI

# Set your OpenAI API key
client = OpenAI(api_key='sk-**', base_url='')
model_type = ''

def create_user_profile(config_params, config_prompts):
while True:
try:
agent_profile = {}
for key in config_params:
match key:
case "age":
agent_profile[key] = customize_random_age(config_params[key]["groups"], config_params[key]["ratios"])
case "country":
agent_profile[key] = customize_random_country(config_params[key]["groups"], config_params[key]["ratios"])
case "interested topics":
pass
case _: # Default case for unmatched keys, including default gender, mbti, profession
agent_profile[key] = customize_random_traits(config_params[key]["groups"], config_params[key]["ratios"])

topics = customize_randow_topics(config_params[key]["names"], config_params[key]["descs"], agent_profile) # create user interest topics
agent_profile['interested topics'] = topics

profile = generate_user_profile(agent_profile, config_prompts) # create user profile

return { **profile, **agent_profile }
except Exception as e:
print(f"Profile generation failed: {e}. Retrying...")


async def generate_user_data(config):
config_count=config["count"]
config_params=config["params"]
config_prompts=config["prompts"]

loop = asyncio.get_event_loop()

user_data = []
start_time = datetime.now()
max_workers = 100 # Adjust according to your system capability
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = [loop.run_in_executor(executor, create_user_profile, config_params, config_prompts) for _ in range(config_count)]
results = await asyncio.gather(*futures)

for i, profile in enumerate(results):
user_data.append(profile)
elapsed_time = datetime.now() - start_time
print(f"Generated {i + 1}/{config_count} user profiles. Time elapsed: "
f"{elapsed_time}")

return user_data

def customize_random_country(groups, ratios):
country = random.choices(groups, ratios)[0]
if country == "Other":
response = client.chat.completions.create(
model=model_type,
messages=[{
"role": "system",
"content": "Select a real country name randomly, only country name is needed" # GPT might be right, Qwen need this complement
}])
return response.choices[0].message.content.strip()
return country

def customize_random_traits(groups, ratios):
return random.choices(groups, ratios)[0]

def customize_random_age(groups, ratios):
group = random.choices(groups, ratios)[0]
if group == 'underage':
return random.randint(10, 17)
elif group == '18-29':
return random.randint(18, 29)
elif group == '30-49':
return random.randint(30, 49)
elif group == '50-64':
return random.randint(50, 64)
else:
return random.randint(65, 100)

def customize_randow_topics(names, descs, traits):
topic_index_lst = customize_interested_topics(names, descs, traits)
return index_to_topics(topic_index_lst, names)

def index_to_topics(index_lst, names):
topic_dict = {str(index): value for index, value in enumerate(names)}
result = []
for index in index_lst:
topic = topic_dict[str(index)]
result.append(topic)
return result

def customize_interested_topics(names, descs, traits):
prompt = f"""Based on the provided personality traits, age, gender and profession, please select 2-3 topics of interest from the given list.
Input:\n"""
for key in traits:
prompt += f" {'Personality Traits' if key == 'mbti' else key}: {traits[key]}\n"

prompt += f"Available Topics:\n"
for index, name in enumerate(names):
prompt += f" {index + 1}. {name}: {descs[index]}\n"

prompt += f"""Output:
[list of topic numbers]
Ensure your output could be parsed to **list**, don't output anything else."""

response = client.chat.completions.create(model=model_type,
messages=[{
"role": "system",
"content": prompt
}])

topics = response.choices[0].message.content.strip()
return json.loads(topics)

def generate_user_profile(traits, prompts):
prompt = f"""Please generate a social media user profile based on the provided personal information, including a real name, username, user bio, and a new user persona. The focus should be on creating a fictional background story and detailed interests based on their hobbies and profession.
Input:\n"""
for key in traits:
prompt += f" {key}: {traits[key]}\n"

prompt += prompts
prompt += f"""Ensure the output can be directly parsed to **JSON**, do not output anything else.""" # noqa: E501

response = client.chat.completions.create(model=model_type,
messages=[{
"role": "system",
"content": prompt
}])

profile = response.choices[0].message.content.strip()
return json.loads(profile)
9 changes: 9 additions & 0 deletions ui/web/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
.DS_Store
/node_modules/
*.tsbuildinfo

# React Router
/.react-router/
/build/

/log/
8 changes: 8 additions & 0 deletions ui/web/.vite/deps/_metadata.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
{
"hash": "3c89e79a",
"configHash": "b38cd1b6",
"lockfileHash": "b8144884",
"browserHash": "4e6ee8ad",
"optimized": {},
"chunks": {}
}
3 changes: 3 additions & 0 deletions ui/web/.vite/deps/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"type": "module"
}
55 changes: 55 additions & 0 deletions ui/web/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Stage 1: Build the application
FROM node:22-alpine AS builder

# Enable pnpm and set up cache
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable

WORKDIR /app

# Copy package.json and lockfile first to leverage Docker cache
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
COPY apps/server/package.json ./apps/server/

# Install dependencies using pnpm with store cache
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm install --no-frozen-lockfile

# Copy the rest of the code
COPY . .

# Build the application
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm --filter @cbnsndwch/react-router-nest-server run build

# Stage 2: Production image
FROM node:22-alpine

# Add metadata labels
LABEL org.opencontainers.image.title="React Router Nest Server"
LABEL org.opencontainers.image.description="A demo server application built with React Router and NestJS"
LABEL org.opencontainers.image.source="https://github.com/cbnsndwch/react-router-nest"
LABEL org.opencontainers.image.licenses="MIT"

# Set environment
ENV NODE_ENV=production
ENV PNPM_HOME="/pnpm"
ENV PATH="$PNPM_HOME:$PATH"
RUN corepack enable

WORKDIR /app

# Copy package.json and lockfile
COPY --from=builder /app/apps/server/package.json /app/package.json

# Install production dependencies using the same cache
RUN --mount=type=cache,id=pnpm-store,target=/pnpm/store pnpm install --prod --no-frozen-lockfile

# Copy all built application artifacts
COPY --from=builder /app/apps/server/dist /app/dist
COPY --from=builder /app/apps/server/build /app/build

# Expose the application port
EXPOSE 3000

# Run the application
CMD ["node", "dist/main.js"]
82 changes: 82 additions & 0 deletions ui/web/README-TESTING.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Testing Setup for React Router 7 in NestJS

## Overview

This project includes test setup for the React Router 7 integration with NestJS. The tests are designed to validate:

1. The custom server integration between React Router 7 and NestJS
2. Route function behavior (loaders, actions, meta functions)
3. Server-side rendering capabilities

## Testing Structure

The testing is organized as follows:

- `src/react-router.spec.ts` - Tests for the NestJS-React Router integration
- `test/router-test-utils.tsx` - Utilities for testing React Router components
- `test/setup.ts` - Global test setup for Vitest
- `docs/TESTING.md` - Detailed documentation on testing strategies

## Working Tests

- ✅ `src/react-router.spec.ts` - Successfully tests the NestJS integration with React Router
- ✅ Unit tests for NestJS services and controllers

## Current Limitations

There are some limitations when testing React Router 7 components:

1. **Client/Server Boundaries**: React Router 7 enforces strict separation between server and client code. This makes it difficult to directly test certain files like `entry.server.tsx` or route components.

2. **Vite Plugin Restrictions**: The React Router 7 Vite plugin enforces these boundaries during testing, resulting in errors like "React Router Vite plugin can't detect preamble" when attempting to import route components.

3. **Testing Route Components**: Due to the limitations above, testing route components directly can be challenging. Instead, we recommend:
- Testing individual exports (meta, loader, action) in isolation
- Using mock data to test the rendering outside the context of React Router

## Recommended Testing Approach

1. **Unit Test Functions**: Test loader, action, and meta functions directly with mocked context.

2. **Test NestJS Integration**: Test how NestJS mounts the React Router handler.

3. **Component Testing**: Test UI components with mocked props rather than within the React Router context.

## Example Test Structure

```
// For testing loader functions
import { loader } from './route';
import { createMockLoaderArgs } from '../../test/router-test-utils';

describe('Route Loader', () => {
it('returns expected data', async () => {
const args = createMockLoaderArgs();
const result = await loader(args);
expect(result).toEqual(/*...*/);
});
});
```

## Future Improvements

We can improve the testing setup by:

1. Configuring a special test environment that bypasses the Vite plugin restrictions
2. Creating more comprehensive mocks for the React Router context
3. Investigating ways to unit test server-side rendering behavior

## Running Tests

```bash
# Run all tests
pnpm test

# Run tests in watch mode
pnpm test:watch

# Run tests with coverage
pnpm test:cov
```

For more detailed information about testing approaches, see the [TESTING.md](./docs/TESTING.md) documentation.
Loading