Skip to content

Commit 9c96316

Browse files
committed
try ncnn on raspberry pi3
1 parent 4fa3758 commit 9c96316

File tree

4 files changed

+64
-39
lines changed

4 files changed

+64
-39
lines changed

ncnn/.segment.cpp.swp

16 KB
Binary file not shown.

ncnn/CMakeLists.txt

+2-1
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,10 @@ set(CMAKE_CXX_FLAGS "-std=c++14 -O2")
77

88
set (ncnn_DIR ${NCNN_ROOT}/lib/cmake/ncnn)
99
find_package(OpenCV REQUIRED)
10+
find_package(OpenMP REQUIRED)
1011
find_package(ncnn REQUIRED)
1112

1213

1314
add_executable(segment segment.cpp)
1415
target_include_directories(segment PUBLIC ${OpenCV_INCLUDE_DIRS})
15-
target_link_libraries(segment ${OpenCV_LIBRARIES} ncnn)
16+
target_link_libraries(segment ${OpenCV_LIBRARIES} ncnn OpenMP::OpenMP_CXX)

ncnn/README.md

+29-22
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,10 @@
11

22
### My platform
33

4-
* ubuntu 18.04
5-
* Intel(R) Xeon(R) Gold 6240 CPU @ 2.60GHz
6-
* cmake 3.17.1
7-
* opencv built from source
4+
* raspberry pi 3b
5+
* armv8 4core cpu, 1G Memroy
6+
* 2022-04-04-raspios-bullseye-armhf-lite.img
87

9-
### NOTE
10-
11-
Though this demo runs on x86 platform, you can also use it on mobile platforms. NCNN is better optimized on mobile platforms.
128

139

1410
### Install ncnn
@@ -18,49 +14,60 @@ Though this demo runs on x86 platform, you can also use it on mobile platforms.
1814
$ python -m pip install onnx-simplifier
1915
```
2016

17+
TODO:
18+
check vulkan
19+
2120
#### 2. build ncnn
22-
Just follow the ncnn official tutoral of [build-for-linux](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux) to install ncnn:
21+
Just follow the ncnn official tutoral of [build-for-linux](https://github.com/Tencent/ncnn/wiki/how-to-build#build-for-linux) to install ncnn. Following steps are all carried out on my raspberry pi:
2322

2423
**step 1:** install dependencies
2524
```
26-
# apt install build-essential git libprotobuf-dev protobuf-compiler
25+
$ sudo apt install build-essential git cmake libprotobuf-dev protobuf-compiler libopencv-dev
2726
```
2827

2928
**step 2:** (optional) install vulkan
3029

31-
**step 3:** install opencv from source
32-
33-
**step 4:** build
34-
I am using commit `9391fae741a1fb8d58cdfdc92878a5e9800f8567`, and I have not tested over newer commits.
30+
**step 3:** build
31+
I am using commit `5725c028c0980efd`, and I have not tested over other commits.
3532
```
3633
$ git clone https://github.com/Tencent/ncnn.git
3734
$ cd ncnn
35+
$ git reset --hard 5725c028c0980efd
3836
$ git submodule update --init
3937
$ mkdir -p build
40-
$ cmake -DCMAKE_TOOLCHAIN_FILE=../toolchains/host.gcc.toolchain.cmake ..
41-
$ make -j
38+
$ cmake -DCMAKE_BUILD_TYPE=Release -DNCNN_VULKAN=OFF -DNCNN_BUILD_TOOLS=ON -DCMAKE_TOOLCHAIN_FILE=../toolchains/pi3.toolchain.cmake ..
39+
$ make -j2
4240
$ make install
4341
```
4442

4543
### Convert model, build and run the demo
4644

4745
#### 1. convert pytorch model to ncnn model via onnx
46+
On your training platform:
4847
```
4948
$ cd BiSeNet/
5049
$ python tools/export_onnx.py --aux-mode eval --config configs/bisenetv2_city.py --weight-path /path/to/your/model.pth --outpath ./model_v2.onnx
5150
$ python -m onnxsim model_v2.onnx model_v2_sim.onnx
51+
```
52+
53+
Then copy your `model_v2_sim.onnx` from training platform to raspberry device.
54+
55+
On raspberry device:
56+
```
5257
$ /path/to/ncnn/build/tools/onnx/onnx2ncnn model_v2_sim.onnx model_v2_sim.param model_v2_sim.bin
53-
$ mkdir -p ncnn/moidels
54-
$ mv model_v2_sim.param ncnn/models
55-
$ mv model_v2_sim.bin ncnn/models
58+
$ cd BiSeNet/ncnn/
59+
$ mkdir -p models
60+
$ mv model_v2_sim.param models/
61+
$ mv model_v2_sim.bin models/
5662
```
5763

5864
#### 2. compile demo code
65+
On raspberry device:
5966
```
60-
mkdir -p ncnn/build
61-
cd ncnn/build
62-
cmake .. -DNCNN_ROOT=/path/to/ncnn/build/install
63-
make
67+
$ mkdir -p BiSeNet/ncnn/build
68+
$ cd BiSeNet/ncnn/build
69+
$ cmake .. -DNCNN_ROOT=/path/to/ncnn/build/install
70+
$ make
6471
```
6572

6673
#### 3. run demo

ncnn/segment.cpp

+33-16
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,13 @@
55
#include <opencv2/core/core.hpp>
66
#include <opencv2/highgui/highgui.hpp>
77
#include <opencv2/imgproc/imgproc.hpp>
8+
#include <omp.h>
89

910
#include <iostream>
1011
#include <random>
1112
#include <algorithm>
1213
#include <stdio.h>
14+
#include <string>
1315
#include <vector>
1416

1517

@@ -29,7 +31,15 @@ int main(int argc, char** argv) {
2931

3032

3133
void inference() {
32-
bool use_fp16 = false;
34+
int nthreads = 4;
35+
string mod_param = "../models/model_v2_sim.param";
36+
string mod_model = "../models/model_v2_sim.bin";
37+
int oH{512}, oW{1024}, n_classes{19};
38+
float mean[3] = {0.3257f, 0.3690f, 0.3223f};
39+
float var[3] = {0.2112f, 0.2148f, 0.2115f};
40+
string impth = "../../example.png";
41+
string savepth = "out.png";
42+
3343
// load model
3444
ncnn::Net mod;
3545
#if NCNN_VULKAN
@@ -41,30 +51,32 @@ void inference() {
4151
mod.opt.use_vulkan_compute = 1;
4252
mod.set_vulkan_device(1);
4353
#endif
44-
mod.load_param("../models/model_v2_sim.param");
45-
mod.load_model("../models/model_v2_sim.bin");
46-
mod.opt.use_fp16_packed = use_fp16;
47-
mod.opt.use_fp16_storage = use_fp16;
48-
mod.opt.use_fp16_arithmetic = use_fp16;
54+
mod.load_param(mod_param.c_str());
55+
mod.load_model(mod_model.c_str());
56+
// ncnn enable fp16 by default, so we do not need these options
57+
// int8 depends on the model itself, so we do not set here
58+
// bool use_fp16 = false;
59+
// mod.opt.use_fp16_packed = use_fp16;
60+
// mod.opt.use_fp16_storage = use_fp16;
61+
// mod.opt.use_fp16_arithmetic = use_fp16;
4962

5063
// load image, and copy to ncnn mat
51-
int oH{1024}, oW{2048}, n_classes{19};
52-
float mean[3] = {0.3257f, 0.3690f, 0.3223f};
53-
float var[3] = {0.2112f, 0.2148f, 0.2115f};
54-
cv::Mat im = cv::imread("../../example.png");
64+
cv::Mat im = cv::imread(impth);
5565
if (im.empty()) {
5666
fprintf(stderr, "cv::imread failed\n");
5767
return;
5868
}
69+
5970
ncnn::Mat inp = ncnn::Mat::from_pixels_resize(
6071
im.data, ncnn::Mat::PIXEL_BGR, im.cols, im.rows, oW, oH);
6172
for (float &el : mean) el *= 255.;
62-
for (float &el : var) el = 1. / (255. * el);
73+
for (float &el : var) el = 1. / (255. * el);
6374
inp.substract_mean_normalize(mean, var);
6475

6576
// set input, run, get output
6677
ncnn::Extractor ex = mod.create_extractor();
67-
// ex.set_num_threads(1);
78+
ex.set_light_mode(true); // not sure what this mean
79+
ex.set_num_threads(nthreads);
6880
#if NCNN_VULKAN
6981
ex.set_vulkan_compute(true);
7082
#endif
@@ -76,14 +88,16 @@ void inference() {
7688
// generate colorful output, and dump
7789
vector<vector<uint8_t>> color_map = get_color_map();
7890
Mat pred(cv::Size(oW, oH), CV_8UC3);
79-
for (int i{0}; i < oH; ++i) {
91+
int offset = oH * oW;
92+
omp_set_num_threads(omp_get_max_threads());
93+
#pragma omp parallel for
94+
for (int i=0; i < oH; ++i) {
8095
uint8_t *ptr = pred.ptr<uint8_t>(i);
8196
for (int j{0}; j < oW; ++j) {
8297
// compute argmax
83-
int idx, offset, argmax{0};
98+
int idx, argmax{0};
8499
float max;
85100
idx = i * oW + j;
86-
offset = oH * oW;
87101
max = out[idx];
88102
for (int k{1}; k < n_classes; ++k) {
89103
idx += offset;
@@ -99,7 +113,10 @@ void inference() {
99113
ptr += 3;
100114
}
101115
}
102-
cv::imwrite("out.png", pred);
116+
cv::imwrite(savepth, pred);
117+
118+
ex.clear(); // must have this, or error
119+
mod.clear();
103120

104121
}
105122

0 commit comments

Comments
 (0)