Skip to content

Commit 62547ea

Browse files
authored
Merge pull request #7 from cloud-py-api/appapi-2.5
AMD 7900XTX support, smaller Docker Image size - [AppAPI 2.5]
2 parents ff36ebb + 6108432 commit 62547ea

11 files changed

+189
-25
lines changed

.dockerignore

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
.installed_flag

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -93,3 +93,4 @@ MANIFEST
9393
converted/
9494

9595
geckodriver.log
96+
.installed_flag

.run/NC (last).run.xml renamed to .run/NC (28).run.xml

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
<component name="ProjectRunConfigurationManager">
2-
<configuration default="false" name="NC (last)" type="PythonConfigurationType" factoryName="Python">
2+
<configuration default="false" name="NC (28)" type="PythonConfigurationType" factoryName="Python">
33
<module name="ai_image_generator_bot" />
44
<option name="ENV_FILES" value="" />
55
<option name="INTERPRETER_OPTIONS" value="" />
@@ -10,7 +10,7 @@
1010
<env name="APP_PORT" value="9080" />
1111
<env name="APP_SECRET" value="12345" />
1212
<env name="APP_VERSION" value="1.0.0" />
13-
<env name="NEXTCLOUD_URL" value="http://nextcloud.local" />
13+
<env name="NEXTCLOUD_URL" value="http://stable28.local" />
1414
<env name="PYTHONUNBUFFERED" value="1" />
1515
</envs>
1616
<option name="SDK_HOME" value="" />

.run/NC (29).run.xml

+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
<component name="ProjectRunConfigurationManager">
2+
<configuration default="false" name="NC (29)" type="PythonConfigurationType" factoryName="Python">
3+
<module name="ai_image_generator_bot" />
4+
<option name="ENV_FILES" value="" />
5+
<option name="INTERPRETER_OPTIONS" value="" />
6+
<option name="PARENT_ENVS" value="true" />
7+
<envs>
8+
<env name="APP_HOST" value="0.0.0.0" />
9+
<env name="APP_ID" value="ai_image_generator_bot" />
10+
<env name="APP_PORT" value="9080" />
11+
<env name="APP_SECRET" value="12345" />
12+
<env name="APP_VERSION" value="1.0.0" />
13+
<env name="NEXTCLOUD_URL" value="http://stable29.local" />
14+
<env name="PYTHONUNBUFFERED" value="1" />
15+
</envs>
16+
<option name="SDK_HOME" value="" />
17+
<option name="SDK_NAME" value="Python 3.10 (ai_image_generator_bot)" />
18+
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/lib" />
19+
<option name="IS_MODULE_SDK" value="false" />
20+
<option name="ADD_CONTENT_ROOTS" value="true" />
21+
<option name="ADD_SOURCE_ROOTS" value="true" />
22+
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
23+
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/lib/main.py" />
24+
<option name="PARAMETERS" value="" />
25+
<option name="SHOW_COMMAND_LINE" value="false" />
26+
<option name="EMULATE_TERMINAL" value="false" />
27+
<option name="MODULE_MODE" value="false" />
28+
<option name="REDIRECT_INPUT" value="false" />
29+
<option name="INPUT_FILE" value="" />
30+
<method v="2" />
31+
</configuration>
32+
</component>

Dockerfile

+4-1
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,11 @@ ADD j[s] /app/js
88
ADD l10[n] /app/l10n
99
ADD li[b] /app/lib
1010

11+
COPY --chmod=775 healthcheck.sh /
12+
1113
RUN \
1214
python3 -m pip install -r requirements.txt && rm -rf ~/.cache && rm requirements.txt
1315

1416
WORKDIR /app/lib
15-
ENTRYPOINT ["python3", "main.py"]
17+
ENTRYPOINT ["python3", "hw_install.py", "main.py"]
18+
HEALTHCHECK --interval=2s --timeout=2s --retries=300 CMD /healthcheck.sh

Makefile

+34-14
Original file line numberDiff line numberDiff line change
@@ -9,42 +9,62 @@ help:
99
@echo " "
1010
@echo " build-push build image and upload to ghcr.io"
1111
@echo " "
12-
@echo " run install AIImageGeneratorBot for Nextcloud Last"
1312
@echo " run27 install AIImageGeneratorBot for Nextcloud 27"
13+
@echo " run install AIImageGeneratorBot for Nextcloud 28"
14+
@echo " run install AIImageGeneratorBot for Nextcloud 29"
1415
@echo " "
1516
@echo " For development of this example use PyCharm run configurations. Development is always set for last Nextcloud."
1617
@echo " First run 'AIImageGeneratorBot' and then 'make registerXX', after that you can use/debug/develop it and easy test."
1718
@echo " "
18-
@echo " register perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
1919
@echo " register27 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
20+
@echo " register28 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
21+
@echo " register29 perform registration of running 'AIImageGeneratorBot' into the 'manual_install' deploy daemon."
2022

2123
.PHONY: build-push
2224
build-push:
2325
docker login ghcr.io
24-
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:2.0.0 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:latest .
26+
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:2.1.0 .
2527

26-
.PHONY: run
27-
run:
28-
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
29-
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
30-
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml
28+
.PHONY: build-push-latest
29+
build-push-latest:
30+
docker login ghcr.io
31+
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --tag ghcr.io/cloud-py-api/ai_image_generator_bot:latest .
3132

3233
.PHONY: run27
3334
run27:
3435
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
3536
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
3637
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml
3738

38-
.PHONY: register
39-
register:
40-
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
41-
docker exec master-nextcloud-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
42-
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
43-
--force-scopes --wait-finish
39+
.PHONY: run28
40+
run28:
41+
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
42+
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
43+
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml
44+
45+
.PHONY: run29
46+
run29:
47+
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
48+
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot --force-scopes \
49+
--info-xml https://raw.githubusercontent.com/cloud-py-api/ai_image_generator_bot/main/appinfo/info.xml
4450

4551
.PHONY: register27
4652
register27:
4753
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
4854
docker exec master-stable27-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
4955
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
5056
--force-scopes --wait-finish
57+
58+
.PHONY: register28
59+
register28:
60+
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
61+
docker exec master-stable28-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
62+
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
63+
--force-scopes --wait-finish
64+
65+
.PHONY: register29
66+
register29:
67+
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:unregister ai_image_generator_bot --silent --force || true
68+
docker exec master-stable29-1 sudo -u www-data php occ app_api:app:register ai_image_generator_bot manual_install --json-info \
69+
"{\"id\":\"ai_image_generator_bot\",\"name\":\"AIImageGeneratorBot\",\"daemon_config_name\":\"manual_install\",\"version\":\"1.0.0\",\"secret\":\"12345\",\"port\":9080,\"scopes\":[\"TALK\", \"TALK_BOT\", \"FILES\", \"FILES_SHARING\"],\"system\":1}" \
70+
--force-scopes --wait-finish

README.md

+6
Original file line numberDiff line numberDiff line change
@@ -29,3 +29,9 @@ Uses [SDXL-Turbo](https://huggingface.co/stabilityai/sdxl-turbo) for fast image
2929
## State of support
3030

3131
The project is being developed in personal and free time, any ideas or pull requests are welcome.
32+
33+
*Note: We understand that the model used here is quite old and cannot compare with the new SDXL-Lightning*
34+
35+
*We are currently working on a new much more advanced image generation project which you can find here: [Visionatrix](https://github.com/Visionatrix/Visionatrix)*
36+
37+
*If this is of interest to the Nextcloud community, we can try to adapt Visionatrix either as an Image Provider for Nextcloud 30 or simply as an AppAPI Nextcloud application.*

appinfo/info.xml

+11-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
<summary>Stable Diffusion Talk Bot</summary>
66
<description>
77
<![CDATA[
8-
**Requires [`AppAPI`](https://github.com/cloud-py-api/app_api) to work.**
8+
**Requires [`AppAPI`](https://github.com/cloud-py-api/app_api) version 2.5.0+ to work.**
99
1010
The AI model used by this application requires **~9 gigabytes** of video memory.
1111
@@ -15,11 +15,17 @@ If the application is running on a CPU, **14 to 18 gigabytes** of system memory
1515
1616
[`AI Model`](https://huggingface.co/stabilityai/sdxl-turbo) is loaded into memory on the first request and remains in it to quickly process further requests.
1717
18-
This is not an example, this is a ready-to-use application, just enable the bot in the conversation, and type:
18+
After installing, just enable the bot in the conversation, and type:
1919
2020
`@image cinematic portrait of fluffy cat with black eyes`
21+
22+
*Note: We understand that the model used here is quite old and cannot compare with the new SDXL-Lightning*
23+
24+
*We are currently working on a new `much more advanced` image generation project which you can find here: [`Visionatrix`](https://github.com/Visionatrix/Visionatrix)*
25+
26+
*If this is of interest to the Nextcloud community, we can try to adapt Visionatrix either as an Image Provider for Nextcloud 30 or simply as an AppAPI Nextcloud application.*
2127
]]></description>
22-
<version>2.0.1</version>
28+
<version>2.1.0</version>
2329
<licence>MIT</licence>
2430
<author mail="[email protected]" homepage="https://github.com/andrey18106">Andrey Borysenko</author>
2531
<author mail="[email protected]" homepage="https://github.com/bigcat88">Alexander Piskun</author>
@@ -35,13 +41,13 @@ This is not an example, this is a ready-to-use application, just enable the bot
3541
<bugs>https://github.com/cloud-py-api/ai_image_generator_bot/issues</bugs>
3642
<repository type="git">https://github.com/cloud-py-api/ai_image_generator_bot</repository>
3743
<dependencies>
38-
<nextcloud min-version="27" max-version="29"/>
44+
<nextcloud min-version="27" max-version="30"/>
3945
</dependencies>
4046
<external-app>
4147
<docker-install>
4248
<registry>ghcr.io</registry>
4349

44-
<image-tag>2.0.0</image-tag>
50+
<image-tag>2.1.0</image-tag>
4551
</docker-install>
4652
<scopes>
4753
<value>TALK</value>

healthcheck.sh

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
#!/bin/bash
2+
3+
if [ -f "/.installed_flag" ]; then
4+
exit 0
5+
else
6+
exit 1
7+
fi

lib/hw_install.py

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
"""Script to install PyTorch based on "COMPUTE_DEVICE" environment variable.
2+
3+
Possible values: "cuda", "rocm", "cpu"
4+
5+
Advice: "pciutils" package should be installed inside container,
6+
it can be used in a very rare cases to perform autodetect of hardware.
7+
8+
If an additional argument is specified, the script considers this to be the file name of the ExApp entry point.
9+
10+
Remember to adjust it with anything your ExApp need of and add it here or a separately.
11+
12+
Copyright (c) 2024 Alexander Piskun, Nextcloud
13+
"""
14+
15+
# pylint: disable=consider-using-with
16+
17+
import os
18+
import subprocess
19+
import sys
20+
import typing
21+
from pathlib import Path
22+
23+
24+
def hw_autodetect() -> typing.Literal["cuda", "rocm", "cpu"]:
25+
process = subprocess.Popen(
26+
"lspci", # noqa: S607
27+
stdout=subprocess.PIPE,
28+
stderr=subprocess.PIPE,
29+
text=True,
30+
shell=True, # noqa: S602
31+
)
32+
output, errors = process.communicate()
33+
if process.returncode != 0:
34+
print("hw_install: Error running lspci:", flush=True)
35+
print(errors, flush=True)
36+
return "cpu"
37+
for line in output.split("\n"):
38+
if line.find("VGA") != -1:
39+
if line.find("NVIDIA") != -1:
40+
return "cuda"
41+
if line.find("AMD") != -1:
42+
return "rocm"
43+
return "cpu"
44+
45+
46+
def hw_install():
47+
defined_accelerator = os.environ.get("COMPUTE_DEVICE", "")
48+
if not defined_accelerator:
49+
defined_accelerator = hw_autodetect()
50+
51+
if defined_accelerator == "cpu":
52+
requirements = "torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu"
53+
elif defined_accelerator == "rocm":
54+
requirements = "torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.0"
55+
else:
56+
requirements = "torch torchvision torchaudio"
57+
58+
process_args = [sys.executable, "-m", "pip", "install", "--force-reinstall", *requirements.split()]
59+
subprocess.run(
60+
process_args, # noqa: S603
61+
check=False,
62+
stdin=sys.stdin,
63+
stdout=sys.stdout,
64+
stderr=sys.stderr,
65+
)
66+
67+
68+
if __name__ == "__main__":
69+
# we do not want to reinstall "PyTorch" each time the container starts
70+
flag_file = Path("/.installed_flag")
71+
if not flag_file.exists():
72+
print("hw_install: perform installation", flush=True)
73+
hw_install()
74+
flag_file.touch()
75+
if len(sys.argv) <= 1:
76+
print("hw_install: exit", flush=True)
77+
sys.exit(0)
78+
# execute another script if needed
79+
print(f"hw_install: executing additional script: {sys.argv[1]}", flush=True)
80+
r = subprocess.run(
81+
[sys.executable, sys.argv[1]], # noqa: S603
82+
stdin=sys.stdin,
83+
stdout=sys.stdout,
84+
stderr=sys.stderr,
85+
check=False,
86+
)
87+
sys.exit(r.returncode)

requirements.txt

+4-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
1-
nc_py_api[app]>=0.10.0
1+
--extra-index-url https://download.pytorch.org/whl/cpu
2+
torch
3+
torchvision
4+
nc_py_api[app]>=0.12.0
25
diffusers>=0.23.1
36
transformers>=4.36.1
47
accelerate
58
huggingface_hub
6-
torch
7-
torchvision

0 commit comments

Comments
 (0)