Skip to content

Commit 6c620be

Browse files
authored
Merge branch 'master' into add-talk-api
2 parents 31405ef + a35f199 commit 6c620be

File tree

5 files changed

+338
-158
lines changed

5 files changed

+338
-158
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
# Changelog
22

3+
## 1.7.5
4+
* Added support for backoff and retry for error 409 [#107](https://github.com/singer-io/tap-zendesk/pull/107)
5+
* Code Formatting [#107](https://github.com/singer-io/tap-zendesk/pull/107)
36
## 1.7.4
47
* Request Timeout Implementation [#79](https://github.com/singer-io/tap-zendesk/pull/79)
58
## 1.7.3

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from setuptools import setup
44

55
setup(name='tap-zendesk',
6-
version='1.7.4',
6+
version='1.7.5',
77
description='Singer.io tap for extracting data from the Zendesk API',
88
author='Stitch',
99
url='https://singer.io',

tap_zendesk/http.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,16 @@ def is_fatal(exception):
109109

110110
return 400 <=status_code < 500
111111

112+
def should_retry_error(exception):
113+
"""
114+
Return true if exception is required to retry otherwise return false
115+
"""
116+
if isinstance(exception, ZendeskConflictError):
117+
return True
118+
if isinstance(exception,Exception) and isinstance(exception.args[0][1],ConnectionResetError):
119+
return True
120+
return False
121+
112122
def raise_for_error(response):
113123
""" Error handling method which throws custom error. Class for each error defined above which extends `ZendeskError`.
114124
This method map the status code with `ERROR_CODE_EXCEPTION_MAPPING` dictionary and accordingly raise the error.
@@ -130,6 +140,10 @@ def raise_for_error(response):
130140
response.status_code, {}).get("raise_exception", ZendeskError)
131141
raise exc(message, response) from None
132142

143+
@backoff.on_exception(backoff.expo,
144+
(ZendeskConflictError),
145+
max_tries=10,
146+
giveup=lambda e: not should_retry_error(e))
133147
@backoff.on_exception(backoff.expo,
134148
(HTTPError, ZendeskError), # Added support of backoff for all unhandled status codes.
135149
max_tries=10,
@@ -158,7 +172,6 @@ def get_cursor_based(url, access_token, request_timeout, cursor=None, **kwargs):
158172

159173
if cursor:
160174
params['page[after]'] = cursor
161-
162175
response = call_api(url, request_timeout, params=params, headers=headers)
163176
response_json = response.json()
164177

test/test_automatic_fields.py

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,15 @@
22
import tap_tester.runner as runner
33
from base import ZendeskTest
44

5+
# BUG https://jira.talendforge.org/browse/TDL-19428
6+
# [tap-zendesk] Consistently replicating duplicate `organizations` record
7+
58
class ZendeskAutomaticFields(ZendeskTest):
69
"""
7-
Ensure running the tap with all streams selected and all fields deselected results in the replication of just the
10+
Ensure running the tap with all streams selected and all fields deselected results in the replication of just the
811
primary keys and replication keys (automatic fields).
912
"""
10-
13+
1114
def name(self):
1215
return "zendesk_automatic_fields"
1316

@@ -17,7 +20,7 @@ def test_run(self):
1720
Verify that only the automatic fields are sent to the target.
1821
Verify that all replicated records have unique primary key values.
1922
"""
20-
23+
2124
streams_to_test = self.expected_check_streams()
2225

2326
conn_id = connections.ensure_connection(self)
@@ -34,14 +37,14 @@ def test_run(self):
3437

3538
record_count_by_stream = self.run_and_verify_sync(conn_id)
3639
synced_records = runner.get_records_from_target_output()
37-
40+
3841
for stream in streams_to_test:
3942
with self.subTest(stream=stream):
4043

4144
# expected values
4245
expected_keys = self.expected_automatic_fields().get(stream)
4346
expected_primary_keys = self.expected_primary_keys()[stream]
44-
47+
4548
# collect actual values
4649
data = synced_records.get(stream, {})
4750
record_messages_keys = [set(row['data'].keys())
@@ -50,7 +53,7 @@ def test_run(self):
5053
for message in data.get('messages', [])
5154
if message.get('action') == 'upsert']
5255
unique_primary_keys_list = set(primary_keys_list)
53-
56+
5457
# Verify that you get some records for each stream
5558
self.assertGreater(
5659
record_count_by_stream.get(stream, -1), 0,
@@ -59,8 +62,10 @@ def test_run(self):
5962
# Verify that only the automatic fields are sent to the target
6063
for actual_keys in record_messages_keys:
6164
self.assertSetEqual(expected_keys, actual_keys)
62-
63-
#Verify that all replicated records have unique primary key values.
64-
self.assertEqual(len(primary_keys_list),
65-
len(unique_primary_keys_list),
66-
msg="Replicated record does not have unique primary key values.")
65+
66+
# Verify that all replicated records have unique primary key values.
67+
if stream == 'organizations': # BUG_TDL-19428
68+
continue # skipping
69+
self.assertEqual(len(primary_keys_list),
70+
len(unique_primary_keys_list),
71+
msg="Replicated record does not have unique primary key values.")

0 commit comments

Comments
 (0)