forked from actions/ai-inference
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathaction.yml
More file actions
71 lines (67 loc) · 1.98 KB
/
action.yml
File metadata and controls
71 lines (67 loc) · 1.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
name: 'AI Inference'
description: Generate an AI response based on a provided prompt
author: 'GitHub'
# Add your action's branding here. This will appear on the GitHub Marketplace.
branding:
icon: 'message-square'
color: red
# Define your inputs here.
inputs:
prompt:
description: The prompt for the model
required: false
default: ''
prompt-file:
description: Path to a file containing the prompt (supports .txt and .prompt.yml
formats)
required: false
default: ''
input:
description: Template variables in YAML format for .prompt.yml files
required: false
default: ''
file_input:
description: Template variables in YAML format mapping variable names to file paths. The file contents will be used for templating.
required: false
default: ''
model:
description: The model to use
required: false
default: 'openai/gpt-4o'
endpoint:
description: The endpoint to use
required: false
default: 'https://models.github.ai/inference'
system-prompt:
description: The system prompt for the model
required: false
default: 'You are a helpful assistant'
system-prompt-file:
description: Path to a file containing the system prompt
required: false
default: ''
max-tokens:
description: The maximum number of tokens to generate
required: false
default: '200'
token:
description: The token to use
required: false
default: ${{ github.token }}
enable-github-mcp:
description: Enable Model Context Protocol integration with GitHub tools
required: false
default: 'false'
github-mcp-token:
description: The token to use for GitHub MCP server (defaults to the main token if not specified). This must be a PAT for MCP to work.
required: false
default: ''
# Define your outputs here.
outputs:
response:
description: The response from the model
response-file:
description: The file path where the response is saved
runs:
using: node24
main: dist/index.js