Skip to content

Commit 25e4987

Browse files
TechnototesLaptopkevinfrei
authored andcommitted
yay
1 parent 48944e7 commit 25e4987

File tree

2 files changed

+391
-0
lines changed

2 files changed

+391
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,329 @@
1+
package org.firstinspires.ftc.learnbot.subsystems;
2+
3+
import android.graphics.Bitmap;
4+
5+
import com.acmerobotics.dashboard.FtcDashboard;
6+
import com.acmerobotics.dashboard.config.Config;
7+
import com.qualcomm.robotcore.util.ElapsedTime;
8+
import com.qualcomm.robotcore.util.Range;
9+
import com.technototes.library.logger.Log;
10+
import com.technototes.library.logger.LogConfig;
11+
import com.technototes.library.logger.Loggable;
12+
import com.technototes.library.util.Alliance;
13+
14+
import org.firstinspires.ftc.learnbot.helpers.StartingPosition;
15+
import org.opencv.android.Utils;
16+
import org.opencv.core.Core;
17+
import org.opencv.core.Mat;
18+
import org.opencv.core.Rect;
19+
import org.opencv.core.Scalar;
20+
import org.opencv.imgproc.Imgproc;
21+
import org.openftc.easyopencv.OpenCvPipeline;
22+
23+
import java.util.function.Supplier;
24+
25+
@Config
26+
public class VisionPipeline extends OpenCvPipeline implements Supplier<Integer>, Loggable {
27+
28+
public Alliance alliance;
29+
//public StartingPosition side;
30+
31+
public VisionPipeline(Alliance teamAlliance, StartingPosition startSide) {
32+
super();
33+
alliance = teamAlliance;
34+
//side = startSide;
35+
boolean StartingPosition = middleDetected;
36+
}
37+
38+
@Config
39+
public static class VisionConstants {
40+
41+
@Config
42+
public static class Left {
43+
44+
public static int X = 10;
45+
public static int Y = 140;
46+
public static int WIDTH = 90;
47+
public static int HEIGHT = 90;
48+
}
49+
50+
@Config
51+
public static class Middle {
52+
53+
public static int X = 130;
54+
public static int Y = 130;
55+
public static int WIDTH = 90;
56+
public static int HEIGHT = 90;
57+
}
58+
59+
public enum Position {
60+
LEFT,
61+
CENTER,
62+
RIGHT,
63+
}
64+
65+
public static double RED1 = 0;
66+
67+
public static double RED2 = 179;
68+
public static double BLUE = 105;
69+
70+
// The low saturation point for color identification
71+
public static double lowS = 70;
72+
// The high saturation point for color identification
73+
public static double highS = 255;
74+
// The low value for color ID
75+
public static double lowV = 20;
76+
// The high value for color ID
77+
public static double highV = 255;
78+
// The 'range' around the hue that we're looking for
79+
public static double RANGE = 10;
80+
81+
// In the 160x120 bitmap, where are we looking?
82+
83+
public static Scalar RGB_HIGHLIGHT = new Scalar(255, 128, 255);
84+
85+
// the mininum amount of pixels needed in order to find a pixel
86+
public static int MINPIXELCOUNT = 1000;
87+
}
88+
89+
@LogConfig.Run(duringRun = false, duringInit = true)
90+
@Log.Boolean(name = "left")
91+
public volatile boolean leftDetected = false;
92+
93+
@LogConfig.Run(duringRun = false, duringInit = true)
94+
@Log.Boolean(name = "middle")
95+
public volatile boolean middleDetected = true;
96+
97+
@LogConfig.Run(duringRun = false, duringInit = true)
98+
@Log.Boolean(name = "right")
99+
public volatile boolean rightDetected = false;
100+
101+
@LogConfig.Run(duringRun = false, duringInit = true)
102+
@Log(name = "fps")
103+
public volatile double fps = 0.0;
104+
105+
private ElapsedTime time = new ElapsedTime();
106+
107+
public Mat Cr = new Mat();
108+
public Mat img = null;
109+
110+
private int countColor(double hue, Mat rect, int xoff, int yoff) {
111+
Scalar edge1 = new Scalar(
112+
hue - VisionConstants.RANGE,
113+
VisionConstants.lowS,
114+
VisionConstants.lowV
115+
);
116+
Scalar edge2 = new Scalar(
117+
hue + VisionConstants.RANGE,
118+
VisionConstants.highS,
119+
VisionConstants.highV
120+
);
121+
// Check to see which pixels are between edge1 & edge2, output into a boolean matrix Cr
122+
Core.inRange(rect, edge1, edge2, Cr);
123+
int count = 0;
124+
for (int i = 0; i < Cr.width(); i++) {
125+
for (int j = 0; j < Cr.height(); j++) {
126+
if (Cr.get(j, i)[0] > 0) {
127+
count++;
128+
// Draw a dot on the image at this point - input was put into img
129+
// The color choice makes things stripey, which makes it easier to identify
130+
if (VisionSubsystem.VisionSubsystemConstants.DEBUG_VIEW) {
131+
double[] colorToDraw = ((j + i) & 3) != 0 ? edge1.val : edge2.val;
132+
img.put(j + yoff, i + xoff, colorToDraw);
133+
}
134+
}
135+
}
136+
}
137+
return count;
138+
// count = 0;
139+
// for (int i = 0; i < Cr.width(); i++) {
140+
// for (int j = 0; j < Cr.height(); j++) {
141+
// if (Cr.get(j, i)[0] > 0) {
142+
// count++;
143+
// // Draw a dot on the image at this point - input was put into img
144+
// // The color choice makes things stripey, which makes it easier to identify
145+
// if (VisionSubsystem.VisionSubsystemConstants.DEBUG_VIEW) {
146+
// double[] colorToDraw = ((j + i) & 3) != 0 ? edge1.val : edge2.val;
147+
// img.put(
148+
// j + VisionConstants.Left.Y,
149+
// i + VisionConstants.Left.X,
150+
// colorToDraw
151+
// );
152+
// }
153+
// }
154+
// }
155+
// return count;
156+
}
157+
158+
private void countPixels(Mat input) {
159+
// First, slice the smaller rectangle out of the overall bitmap:
160+
Mat mRectToLookAtM = input.submat(
161+
// Row start to Row end
162+
VisionConstants.Middle.Y,
163+
VisionConstants.Middle.Y + VisionConstants.Middle.HEIGHT,
164+
// Col start to Col end
165+
VisionConstants.Middle.X,
166+
VisionConstants.Middle.X + VisionConstants.Middle.WIDTH
167+
);
168+
169+
Mat mRectToLookAtL = input.submat(
170+
// Row start to Row end
171+
VisionConstants.Left.Y,
172+
VisionConstants.Left.Y + VisionConstants.Left.HEIGHT,
173+
// Col start to Col end
174+
VisionConstants.Left.X,
175+
VisionConstants.Left.X + VisionConstants.Left.WIDTH
176+
);
177+
178+
// Next, convert the RGB image to HSV, because HUE is much easier to identify colors in
179+
// The output is in 'customColorSpace'
180+
Mat rectM = new Mat();
181+
Mat rectL = new Mat();
182+
Imgproc.cvtColor(mRectToLookAtM, rectM, Imgproc.COLOR_RGB2HSV);
183+
Imgproc.cvtColor(mRectToLookAtL, rectL, Imgproc.COLOR_RGB2HSV);
184+
// Check to see which colors occur:
185+
int colorCountL = 0;
186+
int colorCountM = 0;
187+
if (this.alliance == Alliance.BLUE) {
188+
colorCountM =
189+
countColor(
190+
VisionConstants.BLUE,
191+
rectM,
192+
VisionConstants.Middle.X,
193+
VisionConstants.Middle.Y
194+
);
195+
} else {
196+
colorCountM =
197+
countColor(
198+
VisionConstants.RED1,
199+
rectM,
200+
VisionConstants.Middle.X,
201+
VisionConstants.Middle.Y
202+
);
203+
colorCountM +=
204+
countColor(
205+
VisionConstants.RED2,
206+
rectM,
207+
VisionConstants.Middle.X,
208+
VisionConstants.Middle.Y
209+
);
210+
}
211+
if (this.alliance == Alliance.BLUE) {
212+
colorCountL =
213+
countColor(
214+
VisionConstants.BLUE,
215+
rectL,
216+
VisionConstants.Left.X,
217+
VisionConstants.Left.Y
218+
);
219+
} else {
220+
colorCountL =
221+
countColor(
222+
VisionConstants.RED1,
223+
rectL,
224+
VisionConstants.Left.X,
225+
VisionConstants.Left.Y
226+
);
227+
colorCountL +=
228+
countColor(VisionConstants.RED2, rectL, VisionConstants.Left.X, VisionConstants.Left.Y);
229+
}
230+
pickLocation(colorCountL, colorCountM);
231+
}
232+
233+
public void detectSignal(Mat input) {
234+
// Put the input matrix in a member variable, so that other functions can draw on it
235+
img = input;
236+
237+
countPixels(input);
238+
// Check which spot we should park in
239+
// middleDetected = countA >= countY && countA >= countP;
240+
// leftDetected = countP >= countA && countP >= countY;
241+
rightDetected = !leftDetected && !middleDetected;
242+
243+
// Draw a rectangle around the area we're looking at, for debugging
244+
int x = Range.clip(VisionConstants.Middle.X - 1, 0, input.width() - 1);
245+
int y = Range.clip(VisionConstants.Middle.Y - 1, 0, input.height() - 1);
246+
int w = Range.clip(VisionConstants.Middle.WIDTH + 2, 1, input.width() - x);
247+
int h = Range.clip(VisionConstants.Middle.HEIGHT + 2, 1, input.height() - y);
248+
249+
int xl = Range.clip(VisionConstants.Left.X - 1, 0, input.width() - 1);
250+
int yl = Range.clip(VisionConstants.Left.Y - 1, 0, input.height() - 1);
251+
int wl = Range.clip(VisionConstants.Left.WIDTH + 2, 1, input.width() - x);
252+
int hl = Range.clip(VisionConstants.Left.HEIGHT + 2, 1, input.height() - y);
253+
Imgproc.rectangle(input, new Rect(x, y, w, h), VisionConstants.RGB_HIGHLIGHT);
254+
Imgproc.rectangle(input, new Rect(xl, yl, wl, hl), VisionConstants.RGB_HIGHLIGHT);
255+
}
256+
257+
public void init(Mat firstFrame) {
258+
detectSignal(firstFrame);
259+
}
260+
261+
@Override
262+
public Mat processFrame(Mat input) {
263+
// Update the FPS counter to see how slow the vision code is
264+
// As of October 2022, it runs between 10 and 14 FPS.
265+
fps = 1000 / time.milliseconds();
266+
time.reset();
267+
countPixels(input);
268+
if (VisionSubsystem.VisionSubsystemConstants.DEBUG_VIEW) {
269+
sendBitmap();
270+
}
271+
return input;
272+
}
273+
274+
private void pickLocation(int countL, int countM) {
275+
/*
276+
First we have to create a rectangle from teh view that the camera sees.
277+
Then we have to convert RGB to HSV.
278+
Then we check for specific colors (in this case, red and blue)
279+
create another rectangle and do the same stuff
280+
281+
If there is more of that color than a specific value in either of the rectangles(left and middle) then the pixel is there.
282+
If neither of the rectangles have more of that color, then it is in the 3rd position (right)
283+
If there is more blue than red or vice versa, then that color is your alliance yay :)
284+
*/
285+
286+
if (countL > VisionConstants.MINPIXELCOUNT && countL > countM) {
287+
leftDetected = true;
288+
middleDetected = false;
289+
rightDetected = false;
290+
} else if (
291+
countM <= VisionConstants.MINPIXELCOUNT && countL <= VisionConstants.MINPIXELCOUNT
292+
) {
293+
rightDetected = true;
294+
middleDetected = false;
295+
leftDetected = false;
296+
} else {
297+
leftDetected = false;
298+
middleDetected = true;
299+
rightDetected = false;
300+
}
301+
}
302+
303+
@Override
304+
public Integer get() {
305+
return null;
306+
}
307+
308+
public boolean left() {
309+
return leftDetected;
310+
}
311+
312+
public boolean middle() {
313+
return middleDetected;
314+
}
315+
316+
public boolean right() {
317+
return rightDetected;
318+
}
319+
320+
// Helper to send the bitmap to the FTC Dashboard
321+
private void sendBitmap() {
322+
FtcDashboard db = FtcDashboard.getInstance();
323+
if (db != null) {
324+
Bitmap bitmap = Bitmap.createBitmap(img.cols(), img.rows(), Bitmap.Config.RGB_565);
325+
Utils.matToBitmap(img, bitmap);
326+
db.sendImage(bitmap);
327+
}
328+
}
329+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
package org.firstinspires.ftc.learnbot.subsystems;
2+
3+
import com.acmerobotics.dashboard.config.Config;
4+
import com.technototes.library.logger.Loggable;
5+
import com.technototes.library.subsystem.Subsystem;
6+
import com.technototes.library.util.Alliance;
7+
import com.technototes.vision.hardware.Webcam;
8+
9+
10+
import org.firstinspires.ftc.learnbot.helpers.StartingPosition;
11+
import org.openftc.easyopencv.OpenCvCameraRotation;
12+
13+
public class VisionSubsystem implements Subsystem, Loggable {
14+
15+
@Config
16+
public static class VisionSubsystemConstants {
17+
18+
// This is a super-low res image. I don't think we need higher resolution...
19+
// Note: This may be too small for the older camera.
20+
// I think it only goes down to 320 x 240
21+
public static int WIDTH = 320;
22+
public static int HEIGHT = 240;
23+
// Change this if the camera is oriented differently
24+
public static OpenCvCameraRotation ROTATION = OpenCvCameraRotation.UPSIDE_DOWN;
25+
// Turn this on if we want to see the debug image
26+
public static boolean DEBUG_VIEW = true;
27+
}
28+
29+
public Webcam camera;
30+
public VisionPipeline pipeline;
31+
32+
public VisionSubsystem(Webcam c, Alliance alliance, StartingPosition side) {
33+
camera = c;
34+
pipeline = new VisionPipeline(alliance, side);
35+
}
36+
37+
public VisionSubsystem() {
38+
camera = null;
39+
pipeline = null;
40+
}
41+
42+
public void startStreaming() {
43+
camera.startStreaming(
44+
VisionSubsystemConstants.WIDTH,
45+
VisionSubsystemConstants.HEIGHT,
46+
VisionSubsystemConstants.ROTATION
47+
);
48+
}
49+
50+
public void startVisionPipeline() {
51+
camera.setPipeline(pipeline);
52+
// The i -> lambda appears to be for *error reporting* so this line is silly:
53+
camera.openCameraDeviceAsync(this::startStreaming, i -> startVisionPipeline());
54+
}
55+
56+
public void stopVisionPipeline() {
57+
camera.setPipeline(null);
58+
camera.closeCameraDeviceAsync(() -> {
59+
/* Do we need to do anything to stop the vision pipeline? */
60+
});
61+
}
62+
}

0 commit comments

Comments
 (0)