Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
111 changes: 111 additions & 0 deletions api/parsers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
import json
from django.http import QueryDict
from rest_framework import parsers

# NOTE: This class is needed to work with auto-generated OpenAPI SDKs.
# It's important to mention that MultiParser from DRF needs from nested
# dotted notation, e.g: location.point.latitude, location.point.longitude
# See: https://b0uh.github.io/drf-how-to-handle-arrays-and-nested-objects-in-a-multipartform-data-request.html
# But most OpenAPI SDKs (like openapi-generator) do not support that.
# They only support nested JSON objects (encoded to string!), e.g:
# location: '{"point": {"latitude": .., "longitude": ..} }'
# This class converts those JSON strings into dotted notation keys.
# If ever need to use bracket notation see: https://github.com/remigermain/nested-multipart-parser/
class MultiPartJsonNestedParser(parsers.MultiPartParser):
"""
A custom multipart parser that extends MultiPartParser.

It parses nested JSON strings found in the value of form data fields
and converts them into dotted notation keys in the QueryDict.
"""
def parse(self, stream, media_type=None, parser_context=None):
"""
Parses the multi-part request data and converts nested JSON to dotted notation.

Returns a tuple of (QueryDict, MultiValueDict).
"""
# Call the base parser to get the initial QueryDict (data) and MultiValueDict (files)
result = super().parse(stream, media_type, parser_context)
data = result.data
files = result.files

# Create a mutable copy of the data QueryDict for modification
mutable_data = data.copy()
new_data = {}

# Iterate over all keys in the QueryDict
for key, value_list in mutable_data.lists():
# A value_list from QueryDict is always a list of strings

# 1. Attempt to parse the first value as JSON if it seems like a dictionary
# We assume non-list values (like 'created_at') are single-element lists.
# If the list has multiple elements, we treat the field as a list of non-JSON strings
# and leave it alone (e.g., 'tags': ['tag1', 'tag2']).
if len(value_list) == 1 and isinstance(value_list[0], str) and value_list[0].strip().startswith('{'):
try:
json_data = json.loads(value_list[0])
# 2. Flatten the JSON dictionary into dotted notation
flattened = self._flatten_dict(json_data, parent_key=key)
# 3. Add the flattened data to our new_data dictionary
new_data.update(flattened)

# Remove the original key as it's been expanded
# This is implicitly done by building new_data, but for clarity:
# mutable_data.pop(key)

except json.JSONDecodeError:
# Not valid JSON, treat it as a regular string field
new_data[key] = value_list

else:
# Field is not a single JSON string, e.g., 'note': [''] or 'tags': ['tag1', 'tag2']
# Keep the original data intact
new_data[key] = value_list

# Convert the resulting dictionary back into a QueryDict
# We need to construct it carefully as QueryDict expects lists of values
final_data = QueryDict('', mutable=True)
for k, v in new_data.items():
# v will be either a list (from original data) or a single value (from flattened json)
if isinstance(v, list):
final_data.setlist(k, v)
else:
final_data[k] = v

return parsers.DataAndFiles(final_data, files)

def _flatten_dict(self, d, parent_key='', sep='.'):
"""
Recursively flattens a nested dictionary into a single-level dictionary
with dotted keys.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, dict):
# Recurse into nested dictionaries
items.extend(self._flatten_dict(v, new_key, sep=sep).items())
elif isinstance(v, list):
# Handle lists by keeping the key and setting the value as the list
# This is a simplification; a more complex parser might flatten lists too.
items.append((new_key, v))
else:
# Add simple key-value pair
items.append((new_key, v))

# When converting back to QueryDict, simple values (not lists) should be
# left as single values for the QueryDict to handle correctly.
final_flat_dict = {}
for k, v in items:
# Important: QueryDict expects lists for multi-value fields.
# If the value is a list (from the JSON), keep it as a list.
if isinstance(v, list):
final_flat_dict[k] = v
else:
# For single values (str, int, float, bool, None), QueryDict will
# automatically wrap it in a list upon assignment.
# However, for consistency with how QueryDict works in general, we
# store the single value.
final_flat_dict[k] = str(v) # Convert to string for form data

return final_flat_dict
57 changes: 50 additions & 7 deletions api/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from uuid import UUID

from django.contrib.auth import get_user_model
from django.contrib.gis.geos import Point
from django.db import transaction

from drf_spectacular.utils import extend_schema_field
Expand Down Expand Up @@ -533,11 +534,10 @@ class SimplePhotoSerializer(serializers.ModelSerializer):
source="photo", use_url=True, read_only=True,
help_text="URL of the photo associated with the item. Note: This URL may change over time. Do not rely on it for permanent storage."
)
file = serializers.ImageField(required=True, source="photo", write_only=True)

class Meta:
model = Photo
fields = ("uuid", "url", "file")
fields = ("uuid", "url")
read_only_fields = (
"uuid",
)
Expand All @@ -552,7 +552,23 @@ class AdmBoundarySerializer(serializers.Serializer):
source = serializers.CharField(required=True, allow_null=False)
level = serializers.IntegerField(required=True, min_value=0)

point = PointField(required=True)
class PointSerializer(serializers.Serializer):
latitude = WritableSerializerMethodField(
field_class=serializers.FloatField,
required=True,
)
longitude = WritableSerializerMethodField(
field_class=serializers.FloatField,
required=True,
)

def get_latitude(self, obj: Point) -> float:
return obj.y

def get_longitude(self, obj: Point) -> float:
return obj.x

point = PointSerializer(required=True)
timezone = TimeZoneSerializerChoiceField(read_only=True, allow_null=True)
country = CountrySerializer(read_only=True, allow_null=True)
adm_boundaries = AdmBoundarySerializer(many=True, read_only=True)
Expand Down Expand Up @@ -580,8 +596,8 @@ def to_internal_value(self, data):
preffix = "selected"

point = ret.pop("point")
ret[f"{preffix}_location_lat"] = point.y
ret[f"{preffix}_location_lon"] = point.x
ret[f"{preffix}_location_lat"] = point['latitude']
ret[f"{preffix}_location_lon"] = point['longitude']

return ret

Expand Down Expand Up @@ -736,19 +752,46 @@ class Meta:
read_only_fields = fields

class BaseReportWithPhotosSerializer(BaseReportSerializer):
photos = SimplePhotoSerializer(required=True, many=True)

def get_fields(self):
fields = super().get_fields()
request = self.context.get("request")

# Use different field behavior depending on request method
if request and request.method in ("POST", "PUT", "PATCH"):
# Write mode — accept uploaded image files
fields["photos"] = serializers.ListField(
child=serializers.ImageField(required=True),
write_only=True,
min_length=1,
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

max 3?

)
else:
# Read mode — return nested photo serializer
fields["photos"] = SimplePhotoSerializer(many=True, read_only=True)

return fields

@transaction.atomic
def create(self, validated_data):
photos = validated_data.pop("photos", [])

instance = super().create(validated_data)

# NOTE: do not use bulk here.
for photo in photos:
_ = Photo.objects.create(report=instance, **photo)
_ = Photo.objects.create(report=instance, photo=photo)

return instance

def to_representation(self, instance):
"""
Always serialize output using the read-only `photos` definition,
even if this serializer was initialized in write mode.
"""
# Rebind `photos` temporarily for output
self.fields["photos"] = SimplePhotoSerializer(many=True, read_only=True)
return super().to_representation(instance)

class Meta(BaseReportSerializer.Meta):
fields = BaseReportSerializer.Meta.fields + ("photos",)

Expand Down
3 changes: 2 additions & 1 deletion api/tests/integration/bites/create.tavern.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ stages:
data: &request_bite_data
created_at: '2024-01-01T00:00:00Z'
sent_at: '2024-01-01T00:30:00Z'
location.point: !raw '{"latitude": 41.67419, "longitude": 2.79036}'
location.point.latitude: 41.67419
location.point.longitude: 2.79036
location.source: 'auto'
note: "Test"
tags:
Expand Down
9 changes: 5 additions & 4 deletions api/tests/integration/breeding_sites/create.tavern.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,13 @@ stages:
Authorization: "Bearer {app_user_token:s}"
method: "POST"
files:
photos[0]file: "{test_jpg_image_path}"
photos[1]file: "{test_png_image_path}"
photos[0]: "{test_jpg_image_path}"
photos[1]: "{test_png_image_path}"
data: &request_site_data
created_at: '2024-01-01T00:00:00Z'
sent_at: '2024-01-01T00:30:00Z'
location.point: !raw '{"latitude": 41.67419, "longitude": 2.79036}'
location.point.latitude: 41.67419
location.point.longitude: 2.79036
location.source: 'auto'
note: "Test"
tags:
Expand Down Expand Up @@ -112,6 +113,6 @@ stages:
request:
<<: *request_breeding_site
files:
photos[0]file: "{test_non_image_path}"
photos[0]: "{test_non_image_path}"
response:
status_code: 400
9 changes: 5 additions & 4 deletions api/tests/integration/observations/create.tavern.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,13 @@ stages:
Authorization: "Bearer {app_user_token:s}"
method: "POST"
files:
photos[0]file: "{test_jpg_image_path}"
photos[1]file: "{test_png_image_path}"
photos[0]: "{test_jpg_image_path}"
photos[1]: "{test_png_image_path}"
data: &request_site_data
created_at: '2024-01-01T00:00:00Z'
sent_at: '2024-01-01T00:30:00Z'
location.point: !raw '{"latitude": 41.67419, "longitude": 2.79036}'
location.point.latitude: 41.67419
location.point.longitude: 2.79036
location.source: 'auto'
note: "Test"
tags:
Expand Down Expand Up @@ -114,6 +115,6 @@ stages:
request:
<<: *request_observation
files:
photos[0]file: "{test_non_image_path}"
photos[0]: "{test_non_image_path}"
response:
status_code: 400
5 changes: 3 additions & 2 deletions api/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
UpdateModelMixin,
DestroyModelMixin,
)
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework.parsers import FormParser
from rest_framework.permissions import AllowAny, IsAuthenticated, SAFE_METHODS
from rest_framework.response import Response
from rest_framework.settings import api_settings
Expand Down Expand Up @@ -57,6 +57,7 @@
TaxonFilter
)
from .mixins import IdentificationTaskNestedAttribute
from .parsers import MultiPartJsonNestedParser
from .serializers import (
PartnerSerializer,
CampaignSerializer,
Expand Down Expand Up @@ -333,7 +334,7 @@ def get_parsers(self):
# Since photos are required on POST, only allow
# parasers that allow files.
if self.request and self.request.method == 'POST':
return [MultiPartParser(), FormParser()]
return [MultiPartJsonNestedParser(), FormParser()]
return super().get_parsers()

class BiteViewSet(BaseReportViewSet):
Expand Down
5 changes: 3 additions & 2 deletions api/viewsets.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
from rest_framework.authentication import TokenAuthentication
from rest_framework.viewsets import GenericViewSet as DRFGenericViewSet
from rest_framework.pagination import PageNumberPagination
from rest_framework.parsers import JSONParser, FormParser, MultiPartParser
from rest_framework.parsers import JSONParser, FormParser
from rest_framework.renderers import JSONRenderer
from rest_framework_nested.viewsets import NestedViewSetMixin as OriginalNestedViewSetMixin, _force_mutable

from .auth.authentication import AppUserJWTAuthentication, NonAppUserSessionAuthentication
from .parsers import MultiPartJsonNestedParser
from .permissions import UserObjectPermissions, IsMobileUser, DjangoRegularUserModelPermissions


Expand All @@ -31,7 +32,7 @@ def pagination_class(self):
return self._pagination_class

permission_classes = (UserObjectPermissions,)
parser_classes = (JSONParser, FormParser, MultiPartParser)
parser_classes = (JSONParser, FormParser, MultiPartJsonNestedParser)
renderer_classes = (JSONRenderer,)


Expand Down
24 changes: 24 additions & 0 deletions tigaserver_app/migrations/0086_auto_20251016_1353.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Generated by Django 3.2.25 on 2025-10-16 13:53

from django.db import migrations, models
import tigaserver_app.models


class Migration(migrations.Migration):

dependencies = [
('tigaserver_app', '0085_auto_20250822_1214'),
]

operations = [
migrations.AlterField(
model_name='historicalreport',
name='report_id',
field=models.CharField(db_index=True, default=tigaserver_app.models.generate_report_id, help_text='4-digit alpha-numeric code generated on user phone to identify each unique report from that user. Digits should lbe randomly drawn from the set of all lowercase and uppercase alphabetic characters and 0-9, but excluding 0, o, and O to avoid confusion if we ever need user to be able to refer to a report ID in correspondence with MoveLab (as was previously the case when we had them sending samples).', max_length=4),
),
migrations.AlterField(
model_name='report',
name='report_id',
field=models.CharField(db_index=True, default=tigaserver_app.models.generate_report_id, help_text='4-digit alpha-numeric code generated on user phone to identify each unique report from that user. Digits should lbe randomly drawn from the set of all lowercase and uppercase alphabetic characters and 0-9, but excluding 0, o, and O to avoid confusion if we ever need user to be able to refer to a report ID in correspondence with MoveLab (as was previously the case when we had them sending samples).', max_length=4),
),
]
6 changes: 6 additions & 0 deletions tigaserver_app/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
from PIL import Image
import pydenticon
import os
import random
from slugify import slugify
import string
from typing import List, Optional, Union
import uuid

Expand Down Expand Up @@ -781,6 +783,9 @@ class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase):
class Meta(GenericUUIDTaggedItemBase.Meta, TaggedItemBase.Meta):
abstract = False

def generate_report_id():
return ''.join(random.choices(string.ascii_letters + string.digits, k=4))

class Report(TimeZoneModelMixin, models.Model):
TYPE_BITE = "bite"
TYPE_ADULT = "adult"
Expand Down Expand Up @@ -849,6 +854,7 @@ class Report(TimeZoneModelMixin, models.Model):
report_id = models.CharField(
max_length=4,
db_index=True,
default=generate_report_id,
help_text="4-digit alpha-numeric code generated on user phone to identify each unique report from that user. Digits should lbe randomly drawn from the set of all lowercase and uppercase alphabetic characters and 0-9, but excluding 0, o, and O to avoid confusion if we ever need user to be able to refer to a report ID in correspondence with MoveLab (as was previously the case when we had them sending samples).",
)

Expand Down
Loading