|
| 1 | +from unittest.mock import patch |
| 2 | + |
1 | 3 | import requests
|
2 | 4 | import responses
|
3 | 5 | from responses.matchers import json_params_matcher
|
4 | 6 |
|
5 | 7 | from roboflow import API_URL
|
6 | 8 | from roboflow.adapters.rfapi import AnnotationSaveError, ImageUploadError
|
7 | 9 | from roboflow.config import DEFAULT_BATCH_NAME
|
8 |
| -from tests import PROJECT_NAME, ROBOFLOW_API_KEY, WORKSPACE_NAME, RoboflowTest |
| 10 | +from tests import PROJECT_NAME, ROBOFLOW_API_KEY, WORKSPACE_NAME, RoboflowTest, ordered |
9 | 11 |
|
10 | 12 |
|
11 | 13 | class TestProject(RoboflowTest):
|
| 14 | + def _create_test_dataset(self, images=None): |
| 15 | + """ |
| 16 | + Create a test dataset with specified images or a default image |
| 17 | +
|
| 18 | + Args: |
| 19 | + images: List of image dictionaries. If None, a default image will be used. |
| 20 | +
|
| 21 | + Returns: |
| 22 | + Dictionary representing a parsed dataset |
| 23 | + """ |
| 24 | + if images is None: |
| 25 | + images = [{"file": "image1.jpg", "split": "train", "annotationfile": {"file": "image1.xml"}}] |
| 26 | + |
| 27 | + return {"location": "/test/location/", "images": images} |
| 28 | + |
| 29 | + def _setup_upload_dataset_mocks( |
| 30 | + self, |
| 31 | + test_dataset=None, |
| 32 | + image_return=None, |
| 33 | + annotation_return=None, |
| 34 | + project_created=False, |
| 35 | + save_annotation_side_effect=None, |
| 36 | + upload_image_side_effect=None, |
| 37 | + ): |
| 38 | + """ |
| 39 | + Set up common mocks for upload_dataset tests |
| 40 | +
|
| 41 | + Args: |
| 42 | + test_dataset: The dataset to return from parsefolder. If None, creates a default dataset |
| 43 | + image_return: Return value for upload_image. Default is successful upload |
| 44 | + annotation_return: Return value for save_annotation. Default is successful annotation |
| 45 | + project_created: Whether to simulate a newly created project |
| 46 | + save_annotation_side_effect: Side effect function for save_annotation |
| 47 | + upload_image_side_effect: Side effect function for upload_image |
| 48 | +
|
| 49 | + Returns: |
| 50 | + Dictionary of mock objects with start and stop methods |
| 51 | + """ |
| 52 | + if test_dataset is None: |
| 53 | + test_dataset = self._create_test_dataset() |
| 54 | + |
| 55 | + if image_return is None: |
| 56 | + image_return = ({"id": "test-id", "success": True}, 0.1, 0) |
| 57 | + |
| 58 | + if annotation_return is None: |
| 59 | + annotation_return = ({"success": True}, 0.1, 0) |
| 60 | + |
| 61 | + # Create the mock objects |
| 62 | + mocks = { |
| 63 | + "parser": patch("roboflow.core.workspace.folderparser.parsefolder", return_value=test_dataset), |
| 64 | + "upload": patch("roboflow.core.workspace.Project.upload_image", side_effect=upload_image_side_effect) |
| 65 | + if upload_image_side_effect |
| 66 | + else patch("roboflow.core.workspace.Project.upload_image", return_value=image_return), |
| 67 | + "save_annotation": patch( |
| 68 | + "roboflow.core.workspace.Project.save_annotation", side_effect=save_annotation_side_effect |
| 69 | + ) |
| 70 | + if save_annotation_side_effect |
| 71 | + else patch("roboflow.core.workspace.Project.save_annotation", return_value=annotation_return), |
| 72 | + "get_project": patch( |
| 73 | + "roboflow.core.workspace.Workspace._get_or_create_project", return_value=(self.project, project_created) |
| 74 | + ), |
| 75 | + } |
| 76 | + |
| 77 | + return mocks |
| 78 | + |
12 | 79 | def test_check_valid_image_with_accepted_formats(self):
|
13 | 80 | images_to_test = [
|
14 | 81 | "rabbit.JPG",
|
@@ -224,3 +291,166 @@ def test_create_annotation_job_error(self):
|
224 | 291 | )
|
225 | 292 |
|
226 | 293 | self.assertEqual(str(context.exception), "Batch not found")
|
| 294 | + |
| 295 | + @ordered |
| 296 | + @responses.activate |
| 297 | + def test_project_upload_dataset(self): |
| 298 | + """Test upload_dataset functionality with various scenarios""" |
| 299 | + test_scenarios = [ |
| 300 | + { |
| 301 | + "name": "string_annotationdesc", |
| 302 | + "dataset": [{"file": "test_image.jpg", "split": "train", "annotationfile": "string_annotation.txt"}], |
| 303 | + "params": {"num_workers": 1}, |
| 304 | + "assertions": {}, |
| 305 | + }, |
| 306 | + { |
| 307 | + "name": "success_basic", |
| 308 | + "dataset": [ |
| 309 | + {"file": "image1.jpg", "split": "train", "annotationfile": {"file": "image1.xml"}}, |
| 310 | + {"file": "image2.jpg", "split": "valid", "annotationfile": {"file": "image2.xml"}}, |
| 311 | + ], |
| 312 | + "params": {}, |
| 313 | + "assertions": {"parser": [("/test/dataset",)], "upload": {"count": 2}, "save_annotation": {"count": 2}}, |
| 314 | + "image_return": ({"id": "test-id-1", "success": True}, 0.1, 0), |
| 315 | + }, |
| 316 | + { |
| 317 | + "name": "custom_parameters", |
| 318 | + "dataset": None, |
| 319 | + "params": { |
| 320 | + "num_workers": 2, |
| 321 | + "project_license": "CC BY 4.0", |
| 322 | + "project_type": "classification", |
| 323 | + "batch_name": "test-batch", |
| 324 | + "num_retries": 3, |
| 325 | + }, |
| 326 | + "assertions": {"upload": {"count": 1, "kwargs": {"batch_name": "test-batch", "num_retry_uploads": 3}}}, |
| 327 | + }, |
| 328 | + { |
| 329 | + "name": "project_creation", |
| 330 | + "dataset": None, |
| 331 | + "params": {"project_name": "new-project"}, |
| 332 | + "assertions": {}, |
| 333 | + "project_created": True, |
| 334 | + }, |
| 335 | + { |
| 336 | + "name": "with_labelmap", |
| 337 | + "dataset": [ |
| 338 | + { |
| 339 | + "file": "image1.jpg", |
| 340 | + "split": "train", |
| 341 | + "annotationfile": {"file": "image1.xml", "labelmap": "path/to/labelmap.json"}, |
| 342 | + } |
| 343 | + ], |
| 344 | + "params": {}, |
| 345 | + "assertions": {"save_annotation": {"count": 1}, "load_labelmap": {"count": 1}}, |
| 346 | + "extra_mocks": [ |
| 347 | + ( |
| 348 | + "load_labelmap", |
| 349 | + "roboflow.core.workspace.load_labelmap", |
| 350 | + {"return_value": {"old_label": "new_label"}}, |
| 351 | + ) |
| 352 | + ], |
| 353 | + }, |
| 354 | + { |
| 355 | + "name": "concurrent_uploads", |
| 356 | + "dataset": [{"file": f"image{i}.jpg", "split": "train"} for i in range(10)], |
| 357 | + "params": {"num_workers": 5}, |
| 358 | + "assertions": {"thread_pool": {"count": 1, "kwargs": {"max_workers": 5}}}, |
| 359 | + "extra_mocks": [("thread_pool", "concurrent.futures.ThreadPoolExecutor", {})], |
| 360 | + }, |
| 361 | + {"name": "empty_dataset", "dataset": [], "params": {}, "assertions": {"upload": {"count": 0}}}, |
| 362 | + { |
| 363 | + "name": "raw_text_annotation", |
| 364 | + "dataset": [ |
| 365 | + { |
| 366 | + "file": "image1.jpg", |
| 367 | + "split": "train", |
| 368 | + "annotationfile": {"rawText": "annotation content here", "format": "json"}, |
| 369 | + } |
| 370 | + ], |
| 371 | + "params": {}, |
| 372 | + "assertions": {"save_annotation": {"count": 1}}, |
| 373 | + }, |
| 374 | + ] |
| 375 | + |
| 376 | + error_cases = [ |
| 377 | + { |
| 378 | + "name": "image_upload_error", |
| 379 | + "side_effect": { |
| 380 | + "upload_image_side_effect": lambda *args, **kwargs: (_ for _ in ()).throw( |
| 381 | + ImageUploadError("Failed to upload image") |
| 382 | + ) |
| 383 | + }, |
| 384 | + "params": {"num_workers": 1}, |
| 385 | + }, |
| 386 | + { |
| 387 | + "name": "annotation_upload_error", |
| 388 | + "side_effect": { |
| 389 | + "save_annotation_side_effect": lambda *args, **kwargs: (_ for _ in ()).throw( |
| 390 | + AnnotationSaveError("Failed to save annotation") |
| 391 | + ) |
| 392 | + }, |
| 393 | + "params": {"num_workers": 1}, |
| 394 | + }, |
| 395 | + ] |
| 396 | + |
| 397 | + for scenario in test_scenarios: |
| 398 | + test_dataset = ( |
| 399 | + self._create_test_dataset(scenario.get("dataset")) if scenario.get("dataset") is not None else None |
| 400 | + ) |
| 401 | + |
| 402 | + extra_mocks = {} |
| 403 | + if "extra_mocks" in scenario: |
| 404 | + for mock_name, target, config in scenario.get("extra_mocks", []): |
| 405 | + extra_mocks[mock_name] = patch(target, **config) |
| 406 | + |
| 407 | + mocks = self._setup_upload_dataset_mocks( |
| 408 | + test_dataset=test_dataset, |
| 409 | + image_return=scenario.get("image_return"), |
| 410 | + project_created=scenario.get("project_created", False), |
| 411 | + ) |
| 412 | + |
| 413 | + mock_objects = {} |
| 414 | + for name, mock in mocks.items(): |
| 415 | + mock_objects[name] = mock.start() |
| 416 | + |
| 417 | + for name, mock in extra_mocks.items(): |
| 418 | + mock_objects[name] = mock.start() |
| 419 | + |
| 420 | + try: |
| 421 | + params = {"dataset_path": "/test/dataset", "project_name": PROJECT_NAME} |
| 422 | + params.update(scenario.get("params", {})) |
| 423 | + |
| 424 | + self.workspace.upload_dataset(**params) |
| 425 | + |
| 426 | + for mock_name, assertion in scenario.get("assertions", {}).items(): |
| 427 | + if isinstance(assertion, list): |
| 428 | + mock_obj = mock_objects.get(mock_name) |
| 429 | + call_args_list = [args for args, _ in mock_obj.call_args_list] |
| 430 | + for expected_args in assertion: |
| 431 | + self.assertIn(expected_args, call_args_list) |
| 432 | + elif isinstance(assertion, dict): |
| 433 | + mock_obj = mock_objects.get(mock_name) |
| 434 | + if "count" in assertion: |
| 435 | + self.assertEqual(mock_obj.call_count, assertion["count"]) |
| 436 | + if "kwargs" in assertion and mock_obj.call_count > 0: |
| 437 | + _, kwargs = mock_obj.call_args |
| 438 | + for key, value in assertion["kwargs"].items(): |
| 439 | + self.assertEqual(kwargs.get(key), value) |
| 440 | + finally: |
| 441 | + for mock in list(mocks.values()) + list(extra_mocks.values()): |
| 442 | + mock.stop() |
| 443 | + |
| 444 | + for case in error_cases: |
| 445 | + mocks = self._setup_upload_dataset_mocks(**case.get("side_effect", {})) |
| 446 | + |
| 447 | + for mock in mocks.values(): |
| 448 | + mock.start() |
| 449 | + |
| 450 | + try: |
| 451 | + params = {"dataset_path": "/test/dataset", "project_name": PROJECT_NAME} |
| 452 | + params.update(case.get("params", {})) |
| 453 | + self.workspace.upload_dataset(**params) |
| 454 | + finally: |
| 455 | + for mock in mocks.values(): |
| 456 | + mock.stop() |
0 commit comments