Skip to content

Commit c8d030c

Browse files
committed
Extend chat-models and prompts samples
1 parent 375f172 commit c8d030c

File tree

87 files changed

+1108
-287
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

87 files changed

+1108
-287
lines changed

01-chat-models/chat-models-mistral-ai/README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -71,29 +71,29 @@ You can now call the application that will use Mistral AI to generate text based
7171
This example uses [httpie](https://httpie.io) to send HTTP requests.
7272

7373
```shell
74-
http :8080/chat
74+
http :8080/chat -b
7575
```
7676

7777
Try passing your custom prompt and check the result.
7878

7979
```shell
80-
http :8080/chat question=="What is the capital of Italy?"
80+
http :8080/chat question=="What is the capital of Italy?" -b
8181
```
8282

8383
The next request is configured with a custom temperature value to obtain a more creative, yet less precise answer.
8484

8585
```shell
86-
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer."
86+
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer." -b
8787
```
8888

8989
The next request is configured with Mistral AI-specific customizations.
9090

9191
```shell
92-
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer."
92+
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer." -b
9393
```
9494

9595
The final request returns the model's answer as a stream.
9696

9797
```shell
98-
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs."
98+
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs." -b
9999
```

01-chat-models/chat-models-mistral-ai/build.gradle

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ plugins {
22
id 'java'
33
id 'org.springframework.boot'
44
id 'io.spring.dependency-management'
5+
id 'org.graalvm.buildtools.native'
56
}
67

78
group = 'com.thomasvitale'

01-chat-models/chat-models-mistral-ai/src/main/java/com/thomasvitale/ai/spring/ChatClientController.java 01-chat-models/chat-models-mistral-ai/src/main/java/com/thomasvitale/ai/spring/ChatController.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212
* Chat examples using the high-level ChatClient API.
1313
*/
1414
@RestController
15-
class ChatClientController {
15+
class ChatController {
1616

1717
private final ChatClient chatClient;
1818

19-
ChatClientController(ChatClient.Builder chatClientBuilder) {
19+
ChatController(ChatClient.Builder chatClientBuilder) {
2020
this.chatClient = chatClientBuilder.build();
2121
}
2222

01-chat-models/chat-models-mistral-ai/src/main/java/com/thomasvitale/ai/spring/ChatModelController.java 01-chat-models/chat-models-mistral-ai/src/main/java/com/thomasvitale/ai/spring/model/ChatModelController.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package com.thomasvitale.ai.spring;
1+
package com.thomasvitale.ai.spring.model;
22

33
import org.springframework.ai.chat.model.ChatModel;
44
import org.springframework.ai.chat.prompt.ChatOptionsBuilder;

01-chat-models/chat-models-multiple-providers/README.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -40,23 +40,23 @@ This example uses [httpie](https://httpie.io) to send HTTP requests.
4040
Using OpenAI:
4141

4242
```shell
43-
http :8080/chat/openai question=="What is the capital of Italy?"
43+
http :8080/chat/openai question=="What is the capital of Italy?" -b
4444
```
4545

4646
Using Mistral AI:
4747

4848
```shell
49-
http :8080/chat/mistral-ai question=="What is the capital of Italy?"
49+
http :8080/chat/mistral-ai question=="What is the capital of Italy?" -b
5050
```
5151

5252
The next request is configured with OpenAI-specific customizations.
5353

5454
```shell
55-
http :8080/chat/openai-options question=="Why is a raven like a writing desk? Give a short answer."
55+
http :8080/chat/openai-options question=="Why is a raven like a writing desk? Give a short answer." -b
5656
```
5757

5858
The next request is configured with Mistral AI-specific customizations.
5959

6060
```shell
61-
http :8080/chat/mistral-ai-options question=="Why is a raven like a writing desk? Give a short answer."
61+
http :8080/chat/mistral-ai-options question=="Why is a raven like a writing desk? Give a short answer." -b
6262
```

01-chat-models/chat-models-multiple-providers/build.gradle

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ plugins {
22
id 'java'
33
id 'org.springframework.boot'
44
id 'io.spring.dependency-management'
5+
id 'org.graalvm.buildtools.native'
56
}
67

78
group = 'com.thomasvitale'

01-chat-models/chat-models-multiple-providers/src/main/java/com/thomasvitale/ai/spring/ChatClientController.java 01-chat-models/chat-models-multiple-providers/src/main/java/com/thomasvitale/ai/spring/ChatController.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,12 +13,12 @@
1313
* Chat examples using the high-level ChatClient API.
1414
*/
1515
@RestController
16-
class ChatClientController {
16+
class ChatController {
1717

1818
private final ChatClient mistralAichatClient;
1919
private final ChatClient openAichatClient;
2020

21-
ChatClientController(MistralAiChatModel mistralAiChatModel, OpenAiChatModel openAiChatModel) {
21+
ChatController(MistralAiChatModel mistralAiChatModel, OpenAiChatModel openAiChatModel) {
2222
this.mistralAichatClient = ChatClient.builder(mistralAiChatModel).build();
2323
this.openAichatClient = ChatClient.builder(openAiChatModel).build();
2424
}

01-chat-models/chat-models-multiple-providers/src/main/java/com/thomasvitale/ai/spring/ChatModelController.java 01-chat-models/chat-models-multiple-providers/src/main/java/com/thomasvitale/ai/spring/model/ChatModelController.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package com.thomasvitale.ai.spring;
1+
package com.thomasvitale.ai.spring.model;
22

33
import org.springframework.ai.chat.prompt.Prompt;
44
import org.springframework.ai.mistralai.MistralAiChatModel;

01-chat-models/chat-models-ollama/README.md

+7-7
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l
5353
### Ollama as a native application
5454

5555
First, make sure you have [Ollama](https://ollama.ai) installed on your laptop.
56-
Then, use Ollama to run the _mistral_ large language model. That's what we'll use in this example.
56+
Then, use Ollama to pull the _mistral_ large language model.
5757

5858
```shell
59-
ollama run mistral
59+
ollama pull mistral
6060
```
6161

6262
Finally, run the Spring Boot application.
@@ -79,29 +79,29 @@ You can now call the application that will use Ollama to generate text based on
7979
This example uses [httpie](https://httpie.io) to send HTTP requests.
8080

8181
```shell
82-
http :8080/chat
82+
http :8080/chat -b
8383
```
8484

8585
Try passing your custom prompt and check the result.
8686

8787
```shell
88-
http :8080/chat question=="What is the capital of Italy?"
88+
http :8080/chat question=="What is the capital of Italy?" -b
8989
```
9090

9191
The next request is configured with a custom temperature value to obtain a more creative, yet less precise answer.
9292

9393
```shell
94-
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer."
94+
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer." -b
9595
```
9696

9797
The next request is configured with Ollama-specific customizations.
9898

9999
```shell
100-
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer."
100+
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer." -b
101101
```
102102

103103
The final request returns the model's answer as a stream.
104104

105105
```shell
106-
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs."
106+
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs." -b
107107
```

01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatClientController.java 01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212
* Chat examples using the high-level ChatClient API.
1313
*/
1414
@RestController
15-
class ChatClientController {
15+
class ChatController {
1616

1717
private final ChatClient chatClient;
1818

19-
ChatClientController(ChatClient.Builder chatClientBuilder) {
19+
ChatController(ChatClient.Builder chatClientBuilder) {
2020
this.chatClient = chatClientBuilder.build();
2121
}
2222

01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatModelController.java 01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/model/ChatModelController.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package com.thomasvitale.ai.spring;
1+
package com.thomasvitale.ai.spring.model;
22

33
import org.springframework.ai.chat.model.ChatModel;
44
import org.springframework.ai.chat.prompt.ChatOptionsBuilder;

01-chat-models/chat-models-openai/README.md

+5-5
Original file line numberDiff line numberDiff line change
@@ -71,29 +71,29 @@ You can now call the application that will use OpenAI to generate text based on
7171
This example uses [httpie](https://httpie.io) to send HTTP requests.
7272

7373
```shell
74-
http :8080/chat
74+
http :8080/chat -b
7575
```
7676

7777
Try passing your custom prompt and check the result.
7878

7979
```shell
80-
http :8080/chat question=="What is the capital of Italy?"
80+
http :8080/chat question=="What is the capital of Italy?" -b
8181
```
8282

8383
The next request is configured with a custom temperature value to obtain a more creative, yet less precise answer.
8484

8585
```shell
86-
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer."
86+
http :8080/chat/generic-options question=="Why is a raven like a writing desk? Give a short answer." -b
8787
```
8888

8989
The next request is configured with Open AI-specific customizations.
9090

9191
```shell
92-
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer."
92+
http :8080/chat/provider-options question=="What can you see beyond what you can see? Give a short answer." -b
9393
```
9494

9595
The final request returns the model's answer as a stream.
9696

9797
```shell
98-
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs."
98+
http --stream :8080/chat/stream question=="Why is a raven like a writing desk? Answer in 3 paragraphs." -b
9999
```

01-chat-models/chat-models-openai/build.gradle

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ plugins {
22
id 'java'
33
id 'org.springframework.boot'
44
id 'io.spring.dependency-management'
5+
id 'org.graalvm.buildtools.native'
56
}
67

78
group = 'com.thomasvitale'

01-chat-models/chat-models-openai/src/main/java/com/thomasvitale/ai/spring/ChatClientController.java 01-chat-models/chat-models-openai/src/main/java/com/thomasvitale/ai/spring/ChatController.java

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212
* Chat examples using the high-level ChatClient API.
1313
*/
1414
@RestController
15-
class ChatClientController {
15+
class ChatController {
1616

1717
private final ChatClient chatClient;
1818

19-
ChatClientController(ChatClient.Builder chatClientBuilder) {
19+
ChatController(ChatClient.Builder chatClientBuilder) {
2020
this.chatClient = chatClientBuilder.build();
2121
}
2222

01-chat-models/chat-models-openai/src/main/java/com/thomasvitale/ai/spring/ChatModelController.java 01-chat-models/chat-models-openai/src/main/java/com/thomasvitale/ai/spring/model/ChatModelController.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
package com.thomasvitale.ai.spring;
1+
package com.thomasvitale.ai.spring.model;
22

33
import org.springframework.ai.chat.model.ChatModel;
44
import org.springframework.ai.chat.prompt.ChatOptionsBuilder;

02-prompts/prompts-basics-ollama/README.md

+7-7
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l
99
### Ollama as a native application
1010

1111
First, make sure you have [Ollama](https://ollama.ai) installed on your laptop.
12-
Then, use Ollama to run the _mistral_ large language model.
12+
Then, use Ollama to pull the _mistral_ large language model.
1313

1414
```shell
15-
ollama run mistral
15+
ollama pull mistral
1616
```
1717

1818
Finally, run the Spring Boot application.
@@ -23,25 +23,25 @@ Finally, run the Spring Boot application.
2323

2424
### Ollama as a dev service with Testcontainers
2525

26-
The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time.
26+
The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service at startup time.
2727

2828
```shell
2929
./gradlew bootTestRun
3030
```
3131

3232
## Calling the application
3333

34-
You can now call the application that will use Ollama and _mistral_ to generate an answer to your questions.
34+
You can now call the application that will use Ollama to generate an answer to your questions.
3535
This example uses [httpie](https://httpie.io) to send HTTP requests.
3636

3737
```shell
38-
http --raw "What is the capital of Italy?" :8080/chat/simple
38+
http --raw "What is the capital of Italy?" :8080/chat/simple -b --pretty none
3939
```
4040

4141
```shell
42-
http --raw "What is the capital of Italy?" :8080/chat/prompt
42+
http --raw "What is the capital of Italy?" :8080/chat/prompt -b --pretty none
4343
```
4444

4545
```shell
46-
http --raw "What is the capital of Italy?" :8080/chat/full
46+
http --raw "What is the capital of Italy?" :8080/chat/full -b
4747
```

02-prompts/prompts-basics-ollama/build.gradle

-3
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,7 @@ dependencies {
2929
testAndDevelopmentOnly 'org.springframework.boot:spring-boot-devtools'
3030

3131
testImplementation 'org.springframework.boot:spring-boot-starter-test'
32-
testImplementation 'org.springframework.boot:spring-boot-starter-webflux'
33-
testImplementation 'org.springframework.boot:spring-boot-testcontainers'
3432
testImplementation 'org.springframework.ai:spring-ai-spring-boot-testcontainers'
35-
testImplementation 'org.testcontainers:junit-jupiter'
3633
testImplementation 'org.testcontainers:ollama'
3734
}
3835

02-prompts/prompts-basics-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java

+9-6
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,9 @@
55
import org.springframework.web.bind.annotation.RequestBody;
66
import org.springframework.web.bind.annotation.RestController;
77

8+
/**
9+
* Chat examples using the high-level ChatClient API.
10+
*/
811
@RestController
912
class ChatController {
1013

@@ -15,18 +18,18 @@ class ChatController {
1518
}
1619

1720
@PostMapping("/chat/simple")
18-
String chatWithText(@RequestBody String input) {
19-
return chatService.chatWithText(input);
21+
String chatWithText(@RequestBody String question) {
22+
return chatService.chatWithText(question);
2023
}
2124

2225
@PostMapping("/chat/prompt")
23-
String chatWithPrompt(@RequestBody String input) {
24-
return chatService.chatWithPrompt(input).getResult().getOutput().getContent();
26+
String chatWithPrompt(@RequestBody String question) {
27+
return chatService.chatWithPrompt(question).getResult().getOutput().getContent();
2528
}
2629

2730
@PostMapping("/chat/full")
28-
ChatResponse chatWithPromptAndFullResponse(@RequestBody String message) {
29-
return chatService.chatWithPrompt(message);
31+
ChatResponse chatWithPromptAndFullResponse(@RequestBody String question) {
32+
return chatService.chatWithPrompt(question);
3033
}
3134

3235
}

02-prompts/prompts-basics-ollama/src/main/java/com/thomasvitale/ai/spring/ChatService.java

+7-4
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,9 @@
55
import org.springframework.ai.chat.prompt.Prompt;
66
import org.springframework.stereotype.Service;
77

8+
/**
9+
* Chat examples using the high-level ChatClient API.
10+
*/
811
@Service
912
class ChatService {
1013

@@ -14,12 +17,12 @@ class ChatService {
1417
this.chatClient = chatClientBuilder.build();
1518
}
1619

17-
String chatWithText(String message) {
18-
return chatClient.prompt().user(message).call().content();
20+
String chatWithText(String question) {
21+
return chatClient.prompt().user(question).call().content();
1922
}
2023

21-
ChatResponse chatWithPrompt(String message) {
22-
return chatClient.prompt(new Prompt(message)).call().chatResponse();
24+
ChatResponse chatWithPrompt(String question) {
25+
return chatClient.prompt(new Prompt(question)).call().chatResponse();
2326
}
2427

2528
}

0 commit comments

Comments
 (0)