diff --git a/PPOCRLabel.py b/PPOCRLabel.py index 436369a..1630f43 100644 --- a/PPOCRLabel.py +++ b/PPOCRLabel.py @@ -2137,7 +2137,9 @@ def loadFile(self, filePath=None, isAdjustScale=True): if unicodeFilePath and os.path.exists(unicodeFilePath): self.canvas.verified = False - cvimg = cv2.imdecode(np.fromfile(unicodeFilePath, dtype=np.uint8), 1) + cvimg = cv2.imdecode( + np.fromfile(unicodeFilePath, dtype=np.uint8), cv2.IMREAD_COLOR + ) height, width, depth = cvimg.shape cvimg = cv2.cvtColor(cvimg, cv2.COLOR_BGR2RGB) image = QImage( @@ -2939,7 +2941,7 @@ def autoRecognition(self): self.init_key_list(self.Cachelabel) def reRecognition(self): - img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), 1) + img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), cv2.IMREAD_COLOR) if self.canvas.shapes: self.result_dic = [] self.result_dic_locked = ( @@ -3023,7 +3025,7 @@ def reRecognition(self): QMessageBox.information(self, "Information", "Draw a box!") def singleRerecognition(self): - img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), 1) + img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), cv2.IMREAD_COLOR) for shape in self.canvas.selectedShapes: box = [[int(p.x()), int(p.y())] for p in shape.points] if len(box) > 4: @@ -3495,7 +3497,9 @@ def saveRecResult(self): idx = self.getImglabelidx(key) try: img_path = os.path.dirname(base_dir) + "/" + key - img = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), -1) + img = cv2.imdecode( + np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR + ) for i, label in enumerate(self.PPlabel[idx]): if label["difficult"]: continue @@ -3645,7 +3649,7 @@ def format_shape(s): self.actions.save.setEnabled(True) def expandSelectedShape(self): - img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), 1) + img = cv2.imdecode(np.fromfile(self.filePath, dtype=np.uint8), cv2.IMREAD_COLOR) for shape in self.canvas.selectedShapes: box = [[int(p.x()), int(p.y())] for p in shape.points] if len(box) > 4: diff --git a/libs/autoDialog.py b/libs/autoDialog.py index e65c4b4..9a7a04e 100644 --- a/libs/autoDialog.py +++ b/libs/autoDialog.py @@ -42,7 +42,7 @@ def run(self): self.listValue.emit(img_path) if self.model == "paddle": h, w, _ = cv2.imdecode( - np.fromfile(img_path, dtype=np.uint8), 1 + np.fromfile(img_path, dtype=np.uint8), cv2.IMREAD_COLOR ).shape if h > 32 and w > 32: result = self.ocr.predict(img_path)[0]