diff --git a/.flake8 b/.flake8 index aebca80..a02ae42 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,5 @@ [flake8] -ignore = W503 -max-line-length = 79 +ignore = W503, E203 +max-line-length = 120 # max-complexity = 18 select = B,C,E,F,W,T4,B9 \ No newline at end of file diff --git a/jira_agile_metrics/calculator.py b/jira_agile_metrics/calculator.py index ec1bb11..c58f57e 100644 --- a/jira_agile_metrics/calculator.py +++ b/jira_agile_metrics/calculator.py @@ -1,10 +1,11 @@ import logging + logger = logging.getLogger(__name__) -class Calculator(object): - """Base class for calculators. - """ + +class Calculator: + """Base class for calculators.""" def __init__(self, query_manager, settings, results): """Initialise with a `QueryManager`, a dict of `settings`, @@ -35,28 +36,32 @@ def write(self): target directory. """ + def run_calculators(calculators, query_manager, settings): """Run all calculators passed in, in the order listed. Returns the aggregated results. """ results = {} - calculators = [C(query_manager, settings, results) for C in calculators] + calculators = [calculator(query_manager, settings, results) for calculator in calculators] # Run all calculators first - for c in calculators: - logger.info("%s running...", c.__class__.__name__) - results[c.__class__] = c.run() - logger.info("%s completed\n", c.__class__.__name__) + for calculator in calculators: + logger.info("%s running...", calculator.__class__.__name__) + results[calculator.__class__] = calculator.run() + logger.info("%s completed\n", calculator.__class__.__name__) # Write all files as a second pass - for c in calculators: - logger.info("Writing file for %s...", c.__class__.__name__) + for calculator in calculators: + logger.info("Writing file for %s...", calculator.__class__.__name__) try: - c.write() - except Exception as e: - logger.exception("Writing file for %s failed with a fatal error. Attempting to run subsequent writers regardless.", c.__class__.__name__) + calculator.write() + except Exception: + logger.exception( + "Writing file for %s failed with a fatal error. Attempting to run subsequent writers regardless.", + calculator.__class__.__name__, + ) else: - logger.info("%s completed\n", c.__class__.__name__) + logger.info("%s completed\n", calculator.__class__.__name__) return results diff --git a/jira_agile_metrics/calculator_test.py b/jira_agile_metrics/calculator_test.py index 645dfa3..72fa241 100644 --- a/jira_agile_metrics/calculator_test.py +++ b/jira_agile_metrics/calculator_test.py @@ -1,46 +1,37 @@ -from .calculator import ( - Calculator, - run_calculators -) +from .calculator import Calculator, run_calculators + def test_run_calculator(): - + written = [] class Enabled(Calculator): - def run(self): return "Enabled" - + def write(self): written.append("Enabled") - - class Disabled(Calculator): + class Disabled(Calculator): def run(self): return "Disabled" - + def write(self): pass class GetPreviousResult(Calculator): - def run(self): - return self.get_result(Enabled) + " " + self.settings['foo'] - + return self.get_result(Enabled) + " " + self.settings["foo"] + def write(self): written.append(self.get_result()) - + calculators = [Enabled, Disabled, GetPreviousResult] query_manager = object() - settings = {'foo': 'bar'} + settings = {"foo": "bar"} results = run_calculators(calculators, query_manager, settings) - assert results == { - Enabled: "Enabled", - Disabled: "Disabled", - GetPreviousResult: "Enabled bar" - } + assert results == {Enabled: "Enabled", Disabled: "Disabled", GetPreviousResult: "Enabled bar"} assert written == ["Enabled", "Enabled bar"] diff --git a/jira_agile_metrics/calculators/ageingwip.py b/jira_agile_metrics/calculators/ageingwip.py index 35e4e01..571ad46 100644 --- a/jira_agile_metrics/calculators/ageingwip.py +++ b/jira_agile_metrics/calculators/ageingwip.py @@ -1,32 +1,33 @@ import logging + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import seaborn as sns from ..calculator import Calculator from ..utils import set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class AgeingWIPChartCalculator(Calculator): - """Draw an ageing WIP chart - """ + """Draw an ageing WIP chart""" def run(self, today=None): # short circuit relatively expensive calculation if it won't be used - if not self.settings['ageing_wip_chart']: + if not self.settings["ageing_wip_chart"]: return None cycle_data = self.get_result(CycleTimeCalculator) - cycle_names = [s['name'] for s in self.settings['cycle']] + cycle_names = [s["name"] for s in self.settings["cycle"]] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] - last_active_column = cycle_names[cycle_names.index(done_column)-1] + committed_column = self.settings["committed_column"] + done_column = self.settings["done_column"] + last_active_column = cycle_names[cycle_names.index(done_column) - 1] today = pd.Timestamp.now().date() if today is None else today # to allow testing @@ -48,22 +49,25 @@ def extract_age(row): return np.NaN return (today - started.date()).days - ageing_wip_data['status'] = ageing_wip_data.apply(extract_status, axis=1) - ageing_wip_data['age'] = ageing_wip_data.apply(extract_age, axis=1) + ageing_wip_data["status"] = ageing_wip_data.apply(extract_status, axis=1) + ageing_wip_data["age"] = ageing_wip_data.apply(extract_age, axis=1) # remove blank rows - ageing_wip_data.dropna(how='any', inplace=True, subset=['status', 'age']) + ageing_wip_data.dropna(how="any", inplace=True, subset=["status", "age"]) # reorder columns so we get key, summary, status, age, and then all the cycle stages - ageing_wip_data = pd.concat(( - ageing_wip_data[['key', 'summary', 'status', 'age']], - ageing_wip_data.loc[:, committed_column:last_active_column] - ), axis=1) + ageing_wip_data = pd.concat( + ( + ageing_wip_data[["key", "summary", "status", "age"]], + ageing_wip_data.loc[:, committed_column:last_active_column], + ), + axis=1, + ) return ageing_wip_data def write(self): - output_file = self.settings['ageing_wip_chart'] + output_file = self.settings["ageing_wip_chart"] if not output_file: logger.debug("No output file specified for ageing WIP chart") return @@ -76,10 +80,10 @@ def write(self): fig, ax = plt.subplots() - if self.settings['ageing_wip_chart_title']: - ax.set_title(self.settings['ageing_wip_chart_title']) + if self.settings["ageing_wip_chart_title"]: + ax.set_title(self.settings["ageing_wip_chart_title"]) - sns.swarmplot(x='status', y='age', order=chart_data.columns[4:], data=chart_data, ax=ax) + sns.swarmplot(x="status", y="age", order=chart_data.columns[4:], data=chart_data, ax=ax) ax.set_xlabel("Status") ax.set_ylabel("Age (days)") @@ -93,5 +97,5 @@ def write(self): # Write file logger.info("Writing ageing WIP chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/ageingwip_test.py b/jira_agile_metrics/calculators/ageingwip_test.py index c4747d3..6766b2f 100644 --- a/jira_agile_metrics/calculators/ageingwip_test.py +++ b/jira_agile_metrics/calculators/ageingwip_test.py @@ -1,113 +1,105 @@ -import pytest import datetime -from pandas import DataFrame -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxChange as Change, - FauxFieldValue as Value -) +from pandas import DataFrame +import pytest +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxChange as Change, FauxFieldValue as Value from ..querymanager import QueryManager -from .cycletime import CycleTimeCalculator -from .ageingwip import AgeingWIPChartCalculator - from ..utils import extend_dict +from .ageingwip import AgeingWIPChartCalculator +from .cycletime import CycleTimeCalculator -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'ageing_wip_chart': 'ageingwip.png' # without a file to write the calculator will stop - }) - -@pytest.fixture -def jira_with_skipped_columns(minimal_fields): - return JIRA(fields=minimal_fields, issues=[ - Issue("A-13", - summary="No Gaps", - issuetype=Value("Story", "story"), - status=Value("Build", "build"), - resolution=None, - resolutiondate=None, - created="2018-01-01 08:15:00", - changes=[ - Change("2018-01-02 08:15:00", [("status", "Backlog", "Next",)]), - Change("2018-01-03 08:15:00", [("status", "Next", "Build",)]), - ], - ), - Issue("A-14", - summary="Gaps", - issuetype=Value("Story", "story"), - status=Value("Build", "build"), - resolution=None, - resolutiondate=None, - created="2018-01-01 08:15:00", - changes=[ - Change("2018-01-02 08:15:00", [("status", "Backlog", "Build",)]), # skipping column Committed - ], - ), - Issue("A-15", - summary="Gaps and withdrawn", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-02 08:15:00", - created="2018-01-01 08:15:00", - changes=[ - Change("2018-01-02 08:15:00", [("status", "Backlog", "Done",), ("resolution", None, "Withdrawn")]), # skipping columns Committed, Build and Test - ], - ), - Issue("A-16", - summary="Gap in first committed step", - issuetype=Value("Story", "story"), - status=Value("Build", "Build"), - resolution=None, - resolutiondate=None, - created="2018-01-01 08:15:00", - changes=[ - Change("2018-01-03 08:15:00", [("status", "Backlog", "Build",)]), # skipping column Committed - ], - ), - ]) - -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, {"ageing_wip_chart": "ageingwip.png"} + ) # without a file to write the calculator will stop + + +@pytest.fixture(name="jira_with_skipped_columns") +def fixture_jira_with_skipped_columns(minimal_fields): + return JIRA( + fields=minimal_fields, + issues=[ + Issue( + "A-13", + summary="No Gaps", + issuetype=Value("Story", "story"), + status=Value("Build", "build"), + resolution=None, + resolutiondate=None, + created="2018-01-01 08:15:00", + changes=[ + Change("2018-01-02 08:15:00", [("status", "Backlog", "Next")]), + Change("2018-01-03 08:15:00", [("status", "Next", "Build")]), + ], + ), + Issue( + "A-14", + summary="Gaps", + issuetype=Value("Story", "story"), + status=Value("Build", "build"), + resolution=None, + resolutiondate=None, + created="2018-01-01 08:15:00", + changes=[Change("2018-01-02 08:15:00", [("status", "Backlog", "Build")])], # skipping column Committed + ), + Issue( + "A-15", + summary="Gaps and withdrawn", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-02 08:15:00", + created="2018-01-01 08:15:00", + changes=[ + Change( + "2018-01-02 08:15:00", [("status", "Backlog", "Done"), ("resolution", None, "Withdrawn")] + ), # skipping columns Committed, Build and Test + ], + ), + Issue( + "A-16", + summary="Gap in first committed step", + issuetype=Value("Story", "story"), + status=Value("Build", "Build"), + resolution=None, + resolutiondate=None, + created="2018-01-01 08:15:00", + changes=[Change("2018-01-03 08:15:00", [("status", "Backlog", "Build")])], # skipping column Committed + ), + ], + ) + + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(large_cycle_time_results): - return extend_dict(large_cycle_time_results, {}) +@pytest.fixture(name="results") +def fixture_results(large_cycle_time_results): + return extend_dict(large_cycle_time_results, {}) -@pytest.fixture -def today(): +@pytest.fixture(name="today") +def fixture_today(): return datetime.date(2018, 1, 10) -@pytest.fixture -def now(today): + +@pytest.fixture(name="now") +def fixture_now(today): return datetime.datetime.combine(today, datetime.time(8, 30, 00)) def test_empty(query_manager, settings, minimal_cycle_time_columns, today): - results = { - CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[]) - } + results = {CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[])} calculator = AgeingWIPChartCalculator(query_manager, settings, results) data = calculator.run(today) - assert list(data.columns) == [ - 'key', - 'summary', - 'status', - 'age', - 'Committed', - 'Build', - 'Test' - ] + assert list(data.columns) == ["key", "summary", "status", "age", "Committed", "Build", "Test"] assert len(data.index) == 0 @@ -116,51 +108,44 @@ def test_columns(query_manager, settings, results, today): data = calculator.run(today) - assert list(data.columns) == [ - 'key', - 'summary', - 'status', - 'age', - 'Committed', - 'Build', - 'Test' - ] + assert list(data.columns) == ["key", "summary", "status", "age", "Committed", "Build", "Test"] + def test_calculate_ageing_wip(query_manager, settings, results, today): calculator = AgeingWIPChartCalculator(query_manager, settings, results) data = calculator.run(today) - assert data[['key', 'status', 'age']].to_dict('records') == [ - {'key': 'A-4', 'status': 'Committed', 'age': 8.0}, - {'key': 'A-5', 'status': 'Committed', 'age': 7.0}, - {'key': 'A-6', 'status': 'Committed', 'age': 7.0}, - {'key': 'A-7', 'status': 'Build', 'age': 8.0}, - {'key': 'A-8', 'status': 'Build', 'age': 8.0}, - {'key': 'A-9', 'status': 'Build', 'age': 8.0}, - {'key': 'A-10', 'status': 'Test', 'age': 8.0}, - {'key': 'A-11', 'status': 'Test', 'age': 8.0}, - {'key': 'A-12', 'status': 'Test', 'age': 8.0}, + assert data[["key", "status", "age"]].to_dict("records") == [ + {"key": "A-4", "status": "Committed", "age": 8.0}, + {"key": "A-5", "status": "Committed", "age": 7.0}, + {"key": "A-6", "status": "Committed", "age": 7.0}, + {"key": "A-7", "status": "Build", "age": 8.0}, + {"key": "A-8", "status": "Build", "age": 8.0}, + {"key": "A-9", "status": "Build", "age": 8.0}, + {"key": "A-10", "status": "Test", "age": 8.0}, + {"key": "A-11", "status": "Test", "age": 8.0}, + {"key": "A-12", "status": "Test", "age": 8.0}, ] + def test_calculate_ageing_wip_with_different_done_column(query_manager, settings, results, today): - settings.update({ - 'done_column': 'Test', - }) + settings.update({"done_column": "Test"}) calculator = AgeingWIPChartCalculator(query_manager, settings, results) data = calculator.run(today) - assert data[['key', 'status', 'age']].to_dict('records') == [ - {'key': 'A-4', 'status': 'Committed', 'age': 8.0}, - {'key': 'A-5', 'status': 'Committed', 'age': 7.0}, - {'key': 'A-6', 'status': 'Committed', 'age': 7.0}, - {'key': 'A-7', 'status': 'Build', 'age': 8.0}, - {'key': 'A-8', 'status': 'Build', 'age': 8.0}, - {'key': 'A-9', 'status': 'Build', 'age': 8.0} + assert data[["key", "status", "age"]].to_dict("records") == [ + {"key": "A-4", "status": "Committed", "age": 8.0}, + {"key": "A-5", "status": "Committed", "age": 7.0}, + {"key": "A-6", "status": "Committed", "age": 7.0}, + {"key": "A-7", "status": "Build", "age": 8.0}, + {"key": "A-8", "status": "Build", "age": 8.0}, + {"key": "A-9", "status": "Build", "age": 8.0}, ] + def test_calculate_ageing_wip_with_skipped_columns(jira_with_skipped_columns, settings, today, now): query_manager = QueryManager(jira_with_skipped_columns, settings) results = {} @@ -169,8 +154,8 @@ def test_calculate_ageing_wip_with_skipped_columns(jira_with_skipped_columns, se ageing_wip_calc = AgeingWIPChartCalculator(query_manager, settings, results) data = ageing_wip_calc.run(today=today) - assert data[['key', 'status', 'age']].to_dict('records') == [ - {'key': 'A-13', 'status': 'Build', 'age': 8.0}, - {'key': 'A-14', 'status': 'Build', 'age': 8.0}, - {'key': 'A-16', 'status': 'Build', 'age': 7.0}, + assert data[["key", "status", "age"]].to_dict("records") == [ + {"key": "A-13", "status": "Build", "age": 8.0}, + {"key": "A-14", "status": "Build", "age": 8.0}, + {"key": "A-16", "status": "Build", "age": 7.0}, ] diff --git a/jira_agile_metrics/calculators/burnup.py b/jira_agile_metrics/calculators/burnup.py index 6b40dc1..3d23b82 100644 --- a/jira_agile_metrics/calculators/burnup.py +++ b/jira_agile_metrics/calculators/burnup.py @@ -1,23 +1,24 @@ import logging -import pandas as pd + import matplotlib.pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import set_chart_style - from .cfd import CFDCalculator + logger = logging.getLogger(__name__) + class BurnupCalculator(Calculator): - """Draw a simple burn-up chart. - """ + """Draw a simple burn-up chart.""" def run(self): cfd_data = self.get_result(CFDCalculator) - - backlog_column = self.settings['backlog_column'] - done_column = self.settings['done_column'] + + backlog_column = self.settings["backlog_column"] + done_column = self.settings["done_column"] if backlog_column not in cfd_data.columns: logger.error("Backlog column %s does not exist", backlog_column) @@ -27,9 +28,9 @@ def run(self): return None return cfd_data[[backlog_column, done_column]] - + def write(self): - output_file = self.settings['burnup_chart'] + output_file = self.settings["burnup_chart"] if not output_file: logger.debug("No output file specified for burnup chart") return @@ -39,10 +40,10 @@ def write(self): if len(chart_data.index) == 0: logger.warning("Unable to draw burnup chart with no data items") return - - window = self.settings['burnup_window'] + + window = self.settings["burnup_window"] if window: - start = chart_data.index.max() - pd.Timedelta(window, 'D') + start = chart_data.index.max() - pd.Timedelta(window, "D") chart_data = chart_data[start:] # Re-check after slicing for window @@ -51,9 +52,9 @@ def write(self): return fig, ax = plt.subplots() - - if self.settings['burnup_chart_title']: - ax.set_title(self.settings['burnup_chart_title']) + + if self.settings["burnup_chart_title"]: + ax.set_title(self.settings["burnup_chart_title"]) fig.autofmt_xdate() @@ -65,17 +66,17 @@ def write(self): bottom = chart_data[chart_data.columns[-1]].min() top = chart_data[chart_data.columns[0]].max() ax.set_ylim(bottom=bottom, top=top) - + # Place legend underneath graph box = ax.get_position() handles, labels = ax.get_legend_handles_labels() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) - ax.legend(handles[:2], labels[:2], loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=2) + ax.legend(handles[:2], labels[:2], loc="upper center", bbox_to_anchor=(0.5, -0.2), ncol=2) set_chart_style() # Write file logger.info("Writing burnup chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/burnup_test.py b/jira_agile_metrics/calculators/burnup_test.py index 2971363..5b3e288 100644 --- a/jira_agile_metrics/calculators/burnup_test.py +++ b/jira_agile_metrics/calculators/burnup_test.py @@ -1,43 +1,42 @@ -import pytest from pandas import DataFrame, Timestamp +import pytest -from .cfd import CFDCalculator +from ..utils import extend_dict from .burnup import BurnupCalculator +from .cfd import CFDCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - }) +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict(minimal_settings, {}) + -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(minimal_cfd_results): + +@pytest.fixture(name="results") +def fixture_results(minimal_cfd_results): return extend_dict(minimal_cfd_results, {}) + def test_empty(query_manager, settings, cfd_columns): - results = { - CFDCalculator: DataFrame([], columns=cfd_columns, index=[]) - } + results = {CFDCalculator: DataFrame([], columns=cfd_columns, index=[])} calculator = BurnupCalculator(query_manager, settings, results) data = calculator.run() assert len(data.index) == 0 + def test_columns(query_manager, settings, results): calculator = BurnupCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == [ - 'Backlog', - 'Done' - ] + assert list(data.columns) == ["Backlog", "Done"] + def test_calculate_burnup(query_manager, settings, results): calculator = BurnupCalculator(query_manager, settings, results) @@ -45,47 +44,45 @@ def test_calculate_burnup(query_manager, settings, results): data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), ] - assert data.to_dict('records') == [ - {'Backlog': 1.0, 'Done': 0.0}, - {'Backlog': 2.0, 'Done': 0.0}, - {'Backlog': 3.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Done': 1.0}, + assert data.to_dict("records") == [ + {"Backlog": 1.0, "Done": 0.0}, + {"Backlog": 2.0, "Done": 0.0}, + {"Backlog": 3.0, "Done": 0.0}, + {"Backlog": 4.0, "Done": 0.0}, + {"Backlog": 4.0, "Done": 0.0}, + {"Backlog": 4.0, "Done": 1.0}, ] + def test_calculate_burnup_with_different_columns(query_manager, settings, results): - settings.update({ - 'backlog_column': 'Committed', - 'done_column': 'Test' - }) + settings.update({"backlog_column": "Committed", "done_column": "Test"}) calculator = BurnupCalculator(query_manager, settings, results) data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), ] - assert data.to_dict('records') == [ - {'Committed': 0.0, 'Test': 0.0}, - {'Committed': 0.0, 'Test': 0.0}, - {'Committed': 2.0, 'Test': 0.0}, - {'Committed': 3.0, 'Test': 0.0}, - {'Committed': 3.0, 'Test': 1.0}, - {'Committed': 3.0, 'Test': 1.0}, + assert data.to_dict("records") == [ + {"Committed": 0.0, "Test": 0.0}, + {"Committed": 0.0, "Test": 0.0}, + {"Committed": 2.0, "Test": 0.0}, + {"Committed": 3.0, "Test": 0.0}, + {"Committed": 3.0, "Test": 1.0}, + {"Committed": 3.0, "Test": 1.0}, ] diff --git a/jira_agile_metrics/calculators/cfd.py b/jira_agile_metrics/calculators/cfd.py index eb54a9d..fa6ff73 100644 --- a/jira_agile_metrics/calculators/cfd.py +++ b/jira_agile_metrics/calculators/cfd.py @@ -1,15 +1,17 @@ import logging -import pandas as pd -import numpy as np + import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from ..calculator import Calculator from ..utils import get_extension, set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class CFDCalculator(Calculator): """Create the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each @@ -24,70 +26,71 @@ class CFDCalculator(Calculator): def run(self): cycle_data = self.get_result(CycleTimeCalculator) - cycle_names = [s['name'] for s in self.settings['cycle']] + cycle_names = [s["name"] for s in self.settings["cycle"]] return calculate_cfd_data(cycle_data, cycle_names) - + def write(self): data = self.get_result() - if self.settings['cfd_data']: - self.write_file(data, self.settings['cfd_data']) + if self.settings["cfd_data"]: + self.write_file(data, self.settings["cfd_data"]) else: logger.debug("No output file specified for CFD file") - - if self.settings['cfd_chart']: - self.write_chart(data, self.settings['cfd_chart']) + + if self.settings["cfd_chart"]: + self.write_chart(data, self.settings["cfd_chart"]) else: logger.debug("No output file specified for CFD chart") - def write_file(self, data, output_files): + @staticmethod + def write_file(data, output_files): for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing CFD data to %s", output_file) - if output_extension == '.json': - data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - data.to_excel(output_file, 'CFD') + if output_extension == ".json": + data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + data.to_excel(output_file, "CFD") else: data.to_csv(output_file) - + def write_chart(self, data, output_file): if len(data.index) == 0: logger.warning("Cannot draw CFD with no data") return - - window = self.settings['cfd_window'] + + window = self.settings["cfd_window"] if window: - start = data.index.max() - pd.Timedelta(window, 'D') + start = data.index.max() - pd.Timedelta(window, "D") data = data[start:] - + # Re-check after slicing if len(data.index) == 0: logger.warning("Cannot draw CFD with no data") return fig, ax = plt.subplots() - - if self.settings['cfd_chart_title']: - ax.set_title(self.settings['cfd_chart_title']) + + if self.settings["cfd_chart_title"]: + ax.set_title(self.settings["cfd_chart_title"]) fig.autofmt_xdate() ax.set_xlabel("Date") ax.set_ylabel("Number of items") - backlog_column = self.settings['backlog_column'] + backlog_column = self.settings["backlog_column"] if backlog_column not in data.columns: logger.error("Backlog column %s does not exist", backlog_column) - return None + return data = data.drop([backlog_column], axis=1) data.plot.area(ax=ax, stacked=False, legend=False) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) bottom = data[data.columns[-1]].min() top = data[data.columns[0]].max() @@ -97,9 +100,10 @@ def write_chart(self, data, output_file): # Write file logger.info("Writing CFD chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) + def calculate_cfd_data(cycle_data, cycle_names): # Build a dataframe of just the "date" columns @@ -107,13 +111,13 @@ def calculate_cfd_data(cycle_data, cycle_names): # Strip out times from all dates cfd_data = pd.DataFrame( - np.array(cfd_data.values, dtype=' %s], wiping data for subsequent step %s", issue.key, snapshot_cycle_step_name, snapshot.from_string, snapshot.to_string, cycle_name) + logger.info( + "Issue %s moved backwards to %s [JIRA: %s -> %s], wiping data for subsequent step %s", + issue.key, + snapshot_cycle_step_name, + snapshot.from_string, + snapshot.to_string, + cycle_name, + ) item[cycle_name] = None - elif snapshot.change == 'Flagged': + elif snapshot.change == "Flagged": if snapshot.from_string == snapshot.to_string is None: # Initial state from None -> None continue @@ -193,17 +215,22 @@ def calculate_cycle_times( impediment_start_status = last_status elif snapshot.to_string is None or snapshot.to_string == "": if impediment_start is None: - logger.warning("Issue %s had impediment flag cleared before being set. This should not happen.", issue.key) + logger.warning( + "Issue %s had impediment flag cleared before being set. This should not happen.", + issue.key, + ) continue if impediment_start_status in active_columns: - item['blocked_days'] += (snapshot.date.date() - impediment_start).days - item['impediments'].append({ - 'start': impediment_start, - 'end': snapshot.date.date(), - 'status': impediment_start_status, - 'flag': impediment_flag, - }) + item["blocked_days"] += (snapshot.date.date() - impediment_start).days + item["impediments"].append( + { + "start": impediment_start, + "end": snapshot.date.date(), + "status": impediment_start_status, + "flag": impediment_flag, + } + ) # Reset for next time impediment_flag = None @@ -216,22 +243,26 @@ def calculate_cycle_times( if issue.fields.resolutiondate: resolution_date = dateutil.parser.parse(issue.fields.resolutiondate).date() if impediment_start_status in active_columns: - item['blocked_days'] += (resolution_date - impediment_start).days - item['impediments'].append({ - 'start': impediment_start, - 'end': resolution_date, - 'status': impediment_start_status, - 'flag': impediment_flag, - }) + item["blocked_days"] += (resolution_date - impediment_start).days + item["impediments"].append( + { + "start": impediment_start, + "end": resolution_date, + "status": impediment_start_status, + "flag": impediment_flag, + } + ) else: if impediment_start_status in active_columns: - item['blocked_days'] += (now.date() - impediment_start).days - item['impediments'].append({ - 'start': impediment_start, - 'end': None, - 'status': impediment_start_status, - 'flag': impediment_flag, - }) + item["blocked_days"] += (now.date() - impediment_start).days + item["impediments"].append( + { + "start": impediment_start, + "end": None, + "status": impediment_start_status, + "flag": impediment_flag, + } + ) impediment_flag = None impediment_start = None impediment_start_status = None @@ -254,24 +285,27 @@ def calculate_cycle_times( committed_timestamp = previous_timestamp if committed_timestamp is not None and done_timestamp is not None: - item['cycle_time'] = done_timestamp - committed_timestamp - item['completed_timestamp'] = done_timestamp - + item["cycle_time"] = done_timestamp - committed_timestamp + item["completed_timestamp"] = done_timestamp - for k, v in item.items(): - series[k]['data'].append(v) + for key, value in item.items(): + series[key]["data"].append(value) if len(unmapped_statuses) > 0: - logger.warn("The following JIRA statuses were found, but not mapped to a workflow state, and have been ignored: %s", ', '.join(sorted(unmapped_statuses))) + logger.warning( + "The following JIRA statuses were found, but not mapped to a workflow state, and have been ignored: %s", + ", ".join(sorted(unmapped_statuses)), + ) data = {} - for k, v in series.items(): - data[k] = pd.Series(v['data'], dtype=v['dtype']) - - return pd.DataFrame(data, - columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + - sorted(attributes.keys()) + - ([query_attribute] if query_attribute else []) + - ['cycle_time', 'completed_timestamp', 'blocked_days', 'impediments'] + - cycle_names + for key, value in series.items(): + data[key] = pd.Series(value["data"], dtype=value["dtype"]) + + return pd.DataFrame( + data, + columns=["key", "url", "issue_type", "summary", "status", "resolution"] + + sorted(attributes.keys()) + + ([query_attribute] if query_attribute else []) + + ["cycle_time", "completed_timestamp", "blocked_days", "impediments"] + + cycle_names, ) diff --git a/jira_agile_metrics/calculators/cycletime_test.py b/jira_agile_metrics/calculators/cycletime_test.py index 9422856..6799c72 100644 --- a/jira_agile_metrics/calculators/cycletime_test.py +++ b/jira_agile_metrics/calculators/cycletime_test.py @@ -1,134 +1,153 @@ -import pytest import datetime -from pandas import NaT, Timestamp, Timedelta -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxChange as Change, - FauxFieldValue as Value -) +from pandas import NaT, Timestamp, Timedelta +import pytest +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxChange as Change, FauxFieldValue as Value from ..querymanager import QueryManager from .cycletime import CycleTimeCalculator -@pytest.fixture -def jira(custom_fields): - return JIRA(fields=custom_fields, issues=[ - Issue("A-1", - summary="Just created", - issuetype=Value("Story", "story"), - status=Value("Backlog", "backlog"), - resolution=None, - resolutiondate=None, - created="2018-01-01 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 10), - customfield_003=Value(None, ["R2", "R3", "R4"]), - customfield_100=None, - changes=[], - ), - Issue("A-2", - summary="Started", - issuetype=Value("Story", "story"), - status=Value("Next", "next"), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 20), - customfield_003=Value(None, []), - customfield_100=None, - changes=[ - Change("2018-01-02 10:01:01", [("Flagged", None, "Impediment")]), - Change("2018-01-03 01:00:00", [("Flagged", "Impediment", "")]), # blocked 1 day in the backlog (doesn't count towards blocked days) - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 10:01:01", [("Flagged", "", "Impediment")]), - Change("2018-01-05 08:01:01", [("Flagged", "Impediment", "")]), # was blocked 1 day - Change("2018-01-08 10:01:01", [("Flagged", "", "Impediment")]), # stays blocked until today - ], - ), - Issue("A-3", - summary="Completed", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-06 01:01:01", - created="2018-01-03 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 30), - customfield_003=Value(None, []), - customfield_100=None, - changes=[ - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-04 10:01:01", [("Flagged", None, "Impediment")]), # should clear two days later when issue resolved - Change("2018-01-05 01:01:01", [("status", "Build", "QA",)]), - Change("2018-01-06 01:01:01", [("status", "QA", "Done",)]), - ], - ), - Issue("A-4", - summary="Moved back", - issuetype=Value("Story", "story"), - status=Value("Next", "next"), - resolution=None, - resolutiondate=None, - created="2018-01-04 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 30), - customfield_003=Value(None, []), - customfield_100=None, - changes=[ - Change("2018-01-04 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-05 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-06 01:01:01", [("status", "Build", "Next",)]), - Change("2018-01-07 01:01:01", [("Flagged", None, "Awaiting input")]), - Change("2018-01-10 10:01:01", [("Flagged", "Awaiting input", "")]), # blocked 3 days - ], - ), - ]) -@pytest.fixture -def jira_with_skipped_columns(custom_fields): - return JIRA(fields=custom_fields, issues=[ - Issue("A-10", - summary="Gaps", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-04 01:01:01", - created="2018-01-01 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 10), - customfield_003=Value(None, []), - customfield_100=None, - changes=[ - Change("2018-01-02 01:05:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 01:01:01", [("status", "Next", "Done",), ("resolution", None, "done")]), # skipping columns Build and Test - ], - ), - Issue("A-11", - summary="More Gaps", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-04 01:01:01", - created="2018-01-01 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 10), - customfield_003=Value(None, []), - customfield_100=None, - changes=[ - Change("2018-01-02 01:05:01", [("status", "Backlog", "Build",)]), - Change("2018-01-04 01:01:01", [("status", "Build", "Done",), ("resolution", None, "done")]), # skipping columns Build and Test - ], - ), - ]) +@pytest.fixture(name="jira") +def fixture_jira(custom_fields): + return JIRA( + fields=custom_fields, + issues=[ + Issue( + "A-1", + summary="Just created", + issuetype=Value("Story", "story"), + status=Value("Backlog", "backlog"), + resolution=None, + resolutiondate=None, + created="2018-01-01 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 10), + customfield_003=Value(None, ["R2", "R3", "R4"]), + customfield_100=None, + changes=[], + ), + Issue( + "A-2", + summary="Started", + issuetype=Value("Story", "story"), + status=Value("Next", "next"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 20), + customfield_003=Value(None, []), + customfield_100=None, + changes=[ + Change("2018-01-02 10:01:01", [("Flagged", None, "Impediment")]), + Change( + "2018-01-03 01:00:00", [("Flagged", "Impediment", "")] + ), # blocked 1 day in the backlog (doesn't count towards blocked days) + Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-04 10:01:01", [("Flagged", "", "Impediment")]), + Change("2018-01-05 08:01:01", [("Flagged", "Impediment", "")]), # was blocked 1 day + Change("2018-01-08 10:01:01", [("Flagged", "", "Impediment")]), # stays blocked until today + ], + ), + Issue( + "A-3", + summary="Completed", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-06 01:01:01", + created="2018-01-03 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 30), + customfield_003=Value(None, []), + customfield_100=None, + changes=[ + Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-04 01:01:01", [("status", "Next", "Build")]), + Change( + "2018-01-04 10:01:01", [("Flagged", None, "Impediment")] + ), # should clear two days later when issue resolved + Change("2018-01-05 01:01:01", [("status", "Build", "QA")]), + Change("2018-01-06 01:01:01", [("status", "QA", "Done")]), + ], + ), + Issue( + "A-4", + summary="Moved back", + issuetype=Value("Story", "story"), + status=Value("Next", "next"), + resolution=None, + resolutiondate=None, + created="2018-01-04 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 30), + customfield_003=Value(None, []), + customfield_100=None, + changes=[ + Change("2018-01-04 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-05 01:01:01", [("status", "Next", "Build")]), + Change("2018-01-06 01:01:01", [("status", "Build", "Next")]), + Change("2018-01-07 01:01:01", [("Flagged", None, "Awaiting input")]), + Change("2018-01-10 10:01:01", [("Flagged", "Awaiting input", "")]), # blocked 3 days + ], + ), + ], + ) + + +@pytest.fixture(name="jira_with_skipped_columns") +def fixture_jira_with_skipped_columns(custom_fields): + return JIRA( + fields=custom_fields, + issues=[ + Issue( + "A-10", + summary="Gaps", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-04 01:01:01", + created="2018-01-01 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 10), + customfield_003=Value(None, []), + customfield_100=None, + changes=[ + Change("2018-01-02 01:05:01", [("status", "Backlog", "Next")]), + Change( + "2018-01-04 01:01:01", [("status", "Next", "Done"), ("resolution", None, "done")] + ), # skipping columns Build and Test + ], + ), + Issue( + "A-11", + summary="More Gaps", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-04 01:01:01", + created="2018-01-01 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 10), + customfield_003=Value(None, []), + customfield_100=None, + changes=[ + Change("2018-01-02 01:05:01", [("status", "Backlog", "Build")]), + Change( + "2018-01-04 01:01:01", [("status", "Build", "Done"), ("resolution", None, "done")] + ), # skipping columns Build and Test + ], + ), + ], + ) -@pytest.fixture -def settings(custom_settings): + +@pytest.fixture(name="settings") +def fixture_settings(custom_settings): return custom_settings + def test_columns(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -137,29 +156,27 @@ def test_columns(jira, settings): data = calculator.run() assert list(data.columns) == [ - 'key', - 'url', - 'issue_type', - 'summary', - 'status', - 'resolution', - - 'Estimate', - 'Release', - 'Team', - - 'cycle_time', - 'completed_timestamp', - 'blocked_days', - 'impediments', - - 'Backlog', - 'Committed', - 'Build', - 'Test', - 'Done' + "key", + "url", + "issue_type", + "summary", + "status", + "resolution", + "Estimate", + "Release", + "Team", + "cycle_time", + "completed_timestamp", + "blocked_days", + "impediments", + "Backlog", + "Committed", + "Build", + "Test", + "Done", ] + def test_empty(custom_fields, settings): jira = JIRA(fields=custom_fields, issues=[]) query_manager = QueryManager(jira, settings) @@ -170,6 +187,7 @@ def test_empty(custom_fields, settings): assert len(data.index) == 0 + def test_movement(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -177,99 +195,117 @@ def test_movement(jira, settings): data = calculator.run(now=datetime.datetime(2018, 1, 10, 15, 37, 0)) - assert data.to_dict('records') == [{ - 'key': 'A-1', - 'url': 'https://example.org/browse/A-1', - 'issue_type': 'Story', - 'summary': 'Just created', - 'status': 'Backlog', - 'resolution': None, - - 'Estimate': 10, - 'Release': 'R3', - 'Team': 'Team 1', - - 'completed_timestamp': NaT, - 'cycle_time': NaT, - 'blocked_days': 0, - 'impediments': [], - - 'Backlog': Timestamp('2018-01-01 00:00:00'), - 'Committed': NaT, - 'Build': NaT, - 'Test': NaT, - 'Done': NaT, - }, { - 'key': 'A-2', - 'url': 'https://example.org/browse/A-2', - 'issue_type': 'Story', - 'summary': 'Started', - 'status': 'Next', - 'resolution': None, - - 'Estimate': 20, - 'Release': 'None', - 'Team': 'Team 1', - - 'completed_timestamp': NaT, - 'cycle_time': NaT, - 'blocked_days': 3, - 'impediments': [ - {'start': datetime.date(2018, 1, 2), 'end': datetime.date(2018, 1, 3), 'status': 'Backlog', 'flag': 'Impediment'}, # doesn't count towards blocked_days - {'start': datetime.date(2018, 1, 4), 'end': datetime.date(2018, 1, 5), 'status': 'Committed', 'flag': 'Impediment'}, - {'start': datetime.date(2018, 1, 8), 'end': None, 'status': 'Committed', 'flag': 'Impediment'}, - ], - - 'Backlog': Timestamp('2018-01-02 00:00:00'), - 'Committed': Timestamp('2018-01-03 00:00:00'), - 'Build': NaT, - 'Test': NaT, - 'Done': NaT, - }, { - 'key': 'A-3', - 'url': 'https://example.org/browse/A-3', - 'summary': 'Completed', - 'issue_type': 'Story', - 'status': 'Done', - 'resolution': 'Done', - - 'Estimate': 30, - 'Release': 'None', - 'Team': 'Team 1', - - 'completed_timestamp': Timestamp('2018-01-06 00:00:00'), - 'cycle_time': Timedelta('3 days 00:00:00'), - 'blocked_days': 2, - 'impediments': [{'start': datetime.date(2018, 1, 4), 'end': datetime.date(2018, 1, 6), 'status': 'Build', 'flag': 'Impediment'}], - - 'Backlog': Timestamp('2018-01-03 00:00:00'), - 'Committed': Timestamp('2018-01-03 00:00:00'), - 'Build': Timestamp('2018-01-04 00:00:00'), - 'Test': Timestamp('2018-01-05 00:00:00'), - 'Done': Timestamp('2018-01-06 00:00:00'), - }, { - 'key': 'A-4', - 'url': 'https://example.org/browse/A-4', - 'summary': 'Moved back', - 'issue_type': 'Story', - 'status': 'Next', - 'resolution': None, - - 'Estimate': 30, - 'Release': 'None', - 'Team': 'Team 1', - - 'completed_timestamp': NaT, - 'cycle_time': NaT, - 'blocked_days': 3, - 'impediments': [{'start': datetime.date(2018, 1, 7), 'end': datetime.date(2018, 1, 10), 'status': 'Committed', 'flag': 'Awaiting input'}], + assert data.to_dict("records") == [ + { + "key": "A-1", + "url": "https://example.org/browse/A-1", + "issue_type": "Story", + "summary": "Just created", + "status": "Backlog", + "resolution": None, + "Estimate": 10, + "Release": "R3", + "Team": "Team 1", + "completed_timestamp": NaT, + "cycle_time": NaT, + "blocked_days": 0, + "impediments": [], + "Backlog": Timestamp("2018-01-01 00:00:00"), + "Committed": NaT, + "Build": NaT, + "Test": NaT, + "Done": NaT, + }, + { + "key": "A-2", + "url": "https://example.org/browse/A-2", + "issue_type": "Story", + "summary": "Started", + "status": "Next", + "resolution": None, + "Estimate": 20, + "Release": "None", + "Team": "Team 1", + "completed_timestamp": NaT, + "cycle_time": NaT, + "blocked_days": 3, + "impediments": [ + { + "start": datetime.date(2018, 1, 2), + "end": datetime.date(2018, 1, 3), + "status": "Backlog", + "flag": "Impediment", + }, # doesn't count towards blocked_days + { + "start": datetime.date(2018, 1, 4), + "end": datetime.date(2018, 1, 5), + "status": "Committed", + "flag": "Impediment", + }, + {"start": datetime.date(2018, 1, 8), "end": None, "status": "Committed", "flag": "Impediment"}, + ], + "Backlog": Timestamp("2018-01-02 00:00:00"), + "Committed": Timestamp("2018-01-03 00:00:00"), + "Build": NaT, + "Test": NaT, + "Done": NaT, + }, + { + "key": "A-3", + "url": "https://example.org/browse/A-3", + "summary": "Completed", + "issue_type": "Story", + "status": "Done", + "resolution": "Done", + "Estimate": 30, + "Release": "None", + "Team": "Team 1", + "completed_timestamp": Timestamp("2018-01-06 00:00:00"), + "cycle_time": Timedelta("3 days 00:00:00"), + "blocked_days": 2, + "impediments": [ + { + "start": datetime.date(2018, 1, 4), + "end": datetime.date(2018, 1, 6), + "status": "Build", + "flag": "Impediment", + } + ], + "Backlog": Timestamp("2018-01-03 00:00:00"), + "Committed": Timestamp("2018-01-03 00:00:00"), + "Build": Timestamp("2018-01-04 00:00:00"), + "Test": Timestamp("2018-01-05 00:00:00"), + "Done": Timestamp("2018-01-06 00:00:00"), + }, + { + "key": "A-4", + "url": "https://example.org/browse/A-4", + "summary": "Moved back", + "issue_type": "Story", + "status": "Next", + "resolution": None, + "Estimate": 30, + "Release": "None", + "Team": "Team 1", + "completed_timestamp": NaT, + "cycle_time": NaT, + "blocked_days": 3, + "impediments": [ + { + "start": datetime.date(2018, 1, 7), + "end": datetime.date(2018, 1, 10), + "status": "Committed", + "flag": "Awaiting input", + } + ], + "Backlog": Timestamp("2018-01-04 00:00:00"), + "Committed": Timestamp("2018-01-04 00:00:00"), + "Build": NaT, + "Test": NaT, + "Done": NaT, + }, + ] - 'Backlog': Timestamp('2018-01-04 00:00:00'), - 'Committed': Timestamp('2018-01-04 00:00:00'), - 'Build': NaT, - 'Test': NaT, - 'Done': NaT, - }] def test_movement_skipped_columns(jira_with_skipped_columns, settings): query_manager = QueryManager(jira_with_skipped_columns, settings) @@ -278,49 +314,45 @@ def test_movement_skipped_columns(jira_with_skipped_columns, settings): data = calculator.run(now=datetime.datetime(2018, 1, 10, 15, 37, 0)) - assert data.to_dict('records') == [{ - 'key': 'A-10', - 'url': 'https://example.org/browse/A-10', - 'issue_type': 'Story', - 'summary': 'Gaps', - 'status': 'Done', - 'resolution': 'Done', - - 'Estimate': 10, - 'Release': 'None', - 'Team': 'Team 1', - - 'completed_timestamp': Timestamp('2018-01-04 00:00:00'), - 'cycle_time': Timedelta('2 days 00:00:00'), - 'blocked_days': 0, - 'impediments': [], - - 'Backlog': Timestamp('2018-01-01 00:00:00'), - 'Committed': Timestamp('2018-01-02 00:00:00'), - 'Build': Timestamp('2018-01-04 00:00:00'), - 'Test': Timestamp('2018-01-04 00:00:00'), - 'Done': Timestamp('2018-01-04 00:00:00'), - }, { - 'key': 'A-11', - 'url': 'https://example.org/browse/A-11', - 'issue_type': 'Story', - 'summary': 'More Gaps', - 'status': 'Done', - 'resolution': 'Done', - - 'Estimate': 10, - 'Release': 'None', - 'Team': 'Team 1', - - 'completed_timestamp': Timestamp('2018-01-04 00:00:00'), - 'cycle_time': Timedelta('2 days 00:00:00'), - 'blocked_days': 0, - 'impediments': [], - - 'Backlog': Timestamp('2018-01-01 00:00:00'), - 'Committed': Timestamp('2018-01-02 00:00:00'), - 'Build': Timestamp('2018-01-02 00:00:00'), - 'Test': Timestamp('2018-01-04 00:00:00'), - 'Done': Timestamp('2018-01-04 00:00:00'), - }] - + assert data.to_dict("records") == [ + { + "key": "A-10", + "url": "https://example.org/browse/A-10", + "issue_type": "Story", + "summary": "Gaps", + "status": "Done", + "resolution": "Done", + "Estimate": 10, + "Release": "None", + "Team": "Team 1", + "completed_timestamp": Timestamp("2018-01-04 00:00:00"), + "cycle_time": Timedelta("2 days 00:00:00"), + "blocked_days": 0, + "impediments": [], + "Backlog": Timestamp("2018-01-01 00:00:00"), + "Committed": Timestamp("2018-01-02 00:00:00"), + "Build": Timestamp("2018-01-04 00:00:00"), + "Test": Timestamp("2018-01-04 00:00:00"), + "Done": Timestamp("2018-01-04 00:00:00"), + }, + { + "key": "A-11", + "url": "https://example.org/browse/A-11", + "issue_type": "Story", + "summary": "More Gaps", + "status": "Done", + "resolution": "Done", + "Estimate": 10, + "Release": "None", + "Team": "Team 1", + "completed_timestamp": Timestamp("2018-01-04 00:00:00"), + "cycle_time": Timedelta("2 days 00:00:00"), + "blocked_days": 0, + "impediments": [], + "Backlog": Timestamp("2018-01-01 00:00:00"), + "Committed": Timestamp("2018-01-02 00:00:00"), + "Build": Timestamp("2018-01-02 00:00:00"), + "Test": Timestamp("2018-01-04 00:00:00"), + "Done": Timestamp("2018-01-04 00:00:00"), + }, + ] diff --git a/jira_agile_metrics/calculators/debt.py b/jira_agile_metrics/calculators/debt.py index d33df34..31249fe 100644 --- a/jira_agile_metrics/calculators/debt.py +++ b/jira_agile_metrics/calculators/debt.py @@ -1,15 +1,17 @@ -import logging import datetime -import dateutil.parser +import logging -import pandas as pd +import dateutil.parser import matplotlib.pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import breakdown_by_month, set_chart_style, to_bin + logger = logging.getLogger(__name__) + class DebtCalculator(Calculator): """Calculate technical debt over time. @@ -26,7 +28,7 @@ class DebtCalculator(Calculator): def run(self, now=None): - query = self.settings['debt_query'] + query = self.settings["debt_query"] # Allows unit testing to use a fixed date if now is None: @@ -36,34 +38,41 @@ def run(self, now=None): if not query: logger.debug("Not calculating debt chart data as no query specified") return None - + # Resolve field name to field id for later lookup - priority_field = self.settings['debt_priority_field'] - priority_field_id = priority_field_id = self.query_manager.field_name_to_id(priority_field) if priority_field else None + priority_field = self.settings["debt_priority_field"] + priority_field_id = priority_field_id = ( + self.query_manager.field_name_to_id(priority_field) if priority_field else None + ) # Build data frame - columns = ['key', 'priority', 'created', 'resolved', 'age'] + columns = ["key", "priority", "created", "resolved", "age"] series = { - 'key': {'data': [], 'dtype': 'str'}, - 'priority': {'data': [], 'dtype': 'str'}, - 'created': {'data': [], 'dtype': 'datetime64[ns]'}, - 'resolved': {'data': [], 'dtype': 'datetime64[ns]'}, - 'age': {'data': [], 'dtype': 'timedelta64[ns]'}, + "key": {"data": [], "dtype": "str"}, + "priority": {"data": [], "dtype": "str"}, + "created": {"data": [], "dtype": "datetime64[ns]"}, + "resolved": {"data": [], "dtype": "datetime64[ns]"}, + "age": {"data": [], "dtype": "timedelta64[ns]"}, } for issue in self.query_manager.find_issues(query, expand=None): created_date = dateutil.parser.parse(issue.fields.created) resolved_date = dateutil.parser.parse(issue.fields.resolutiondate) if issue.fields.resolutiondate else None - series['key']['data'].append(issue.key) - series['priority']['data'].append(self.query_manager.resolve_field_value(issue, priority_field_id) if priority_field else None) - series['created']['data'].append(created_date) - series['resolved']['data'].append(resolved_date) - series['age']['data'].append((resolved_date.replace(tzinfo=None) if resolved_date is not None else now) - created_date.replace(tzinfo=None)) + series["key"]["data"].append(issue.key) + series["priority"]["data"].append( + self.query_manager.resolve_field_value(issue, priority_field_id) if priority_field else None + ) + series["created"]["data"].append(created_date) + series["resolved"]["data"].append(resolved_date) + series["age"]["data"].append( + (resolved_date.replace(tzinfo=None) if resolved_date is not None else now) + - created_date.replace(tzinfo=None) + ) data = {} - for k, v in series.items(): - data[k] = pd.Series(v['data'], dtype=v['dtype']) + for key, value in series.items(): + data[key] = pd.Series(value["data"], dtype=value["dtype"]) return pd.DataFrame(data, columns=columns) @@ -75,74 +84,76 @@ def write(self): if len(chart_data.index) == 0: logger.warning("Cannot draw debt chart with zero items") return - - if self.settings['debt_chart']: - self.write_debt_chart(chart_data, self.settings['debt_chart']) - - if self.settings['debt_age_chart']: - self.write_debt_age_chart(chart_data, self.settings['debt_age_chart']) - + + if self.settings["debt_chart"]: + self.write_debt_chart(chart_data, self.settings["debt_chart"]) + + if self.settings["debt_age_chart"]: + self.write_debt_age_chart(chart_data, self.settings["debt_age_chart"]) + def write_debt_chart(self, chart_data, output_file): - window = self.settings['debt_window'] - priority_values = self.settings['debt_priority_values'] + window = self.settings["debt_window"] + priority_values = self.settings["debt_priority_values"] + + breakdown = breakdown_by_month(chart_data, "created", "resolved", "key", "priority", priority_values) - breakdown = breakdown_by_month(chart_data, 'created', 'resolved', 'key', 'priority', priority_values) - if window: breakdown = breakdown[-window:] fig, ax = plt.subplots() - + breakdown.plot.bar(ax=ax, stacked=True) - - if self.settings['debt_chart_title']: - ax.set_title(self.settings['debt_chart_title']) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + if self.settings["debt_chart_title"]: + ax.set_title(self.settings["debt_chart_title"]) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of items", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing debt chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) - + def write_debt_age_chart(self, chart_data, output_file): - priority_values = self.settings['debt_priority_values'] - bins = self.settings['debt_age_chart_bins'] - - def generate_bin_label(v): - low, high = to_bin(v, bins) - return "> %d days" % (low,) if high is None else "%d-%d days" % (low, high,) + priority_values = self.settings["debt_priority_values"] + bins = self.settings["debt_age_chart_bins"] + + def generate_bin_label(value): + low, high = to_bin(value, bins) + return "> %d days" % (low) if high is None else "%d-%d days" % (low, high) def day_grouper(value): if isinstance(value, pd.Timedelta): return generate_bin_label(value.days) + return None bin_labels = list(map(generate_bin_label, bins + [bins[-1] + 1])) - breakdown = chart_data.pivot_table( - index='age', - columns='priority', - values='key', - aggfunc='count' - ).groupby(day_grouper).sum().reindex(bin_labels).T - + breakdown = ( + chart_data.pivot_table(index="age", columns="priority", values="key", aggfunc="count") + .groupby(day_grouper) + .sum() + .reindex(bin_labels) + .T + ) + if priority_values: breakdown = breakdown.reindex(priority_values) fig, ax = plt.subplots() - + breakdown.plot.barh(ax=ax, stacked=True) - - if self.settings['debt_age_chart_title']: - ax.set_title(self.settings['debt_age_chart_title']) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + if self.settings["debt_age_chart_title"]: + ax.set_title(self.settings["debt_age_chart_title"]) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Number of items", labelpad=20) ax.set_ylabel("Priority", labelpad=10) @@ -150,5 +161,5 @@ def day_grouper(value): # Write file logger.info("Writing debt age chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/debt_test.py b/jira_agile_metrics/calculators/debt_test.py index 1cc136e..5f737ec 100644 --- a/jira_agile_metrics/calculators/debt_test.py +++ b/jira_agile_metrics/calculators/debt_test.py @@ -1,114 +1,124 @@ import datetime -import pytest -from pandas import Timedelta, Timestamp, NaT - -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxFieldValue as Value -) -from ..utils import extend_dict +from pandas import Timedelta, Timestamp, NaT +import pytest +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxFieldValue as Value from ..querymanager import QueryManager +from ..utils import extend_dict from .debt import DebtCalculator -@pytest.fixture -def fields(minimal_fields): + +@pytest.fixture(name="fields") +def fixture_fields(minimal_fields): return minimal_fields + [ - {'id': 'priority', 'name': 'Priority'}, + {"id": "priority", "name": "Priority"}, ] -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'debt_query': 'issueType = "Tech Debt"', - 'debt_priority_field': 'Priority', - 'debt_priority_values': ['Low', 'Medium', 'High'], - 'debt_chart': 'debt-chart.png', - 'debt_chart_title': 'Debt chart', - 'debt_window': 3, - 'debt_age_chart': 'debt-age-chart.png', - 'debt_age_chart_title': 'Debt age', - 'debt_age_chart_bins': [10, 20, 30] - }) - -@pytest.fixture -def jira(fields): - return JIRA(fields=fields, issues=[ - Issue("D-1", - summary="Debt 1", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-01-01 01:01:01", - resolution="Done", - resolutiondate="2018-03-20 02:02:02", - priority=Value("High", "High"), - changes=[], - ), - Issue("D-2", - summary="Debt 2", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-01-02 01:01:01", - resolution="Done", - resolutiondate="2018-01-20 02:02:02", - priority=Value("Medium", "Medium"), - changes=[], - ), - Issue("D-3", - summary="Debt 3", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-02-03 01:01:01", - resolution="Done", - resolutiondate="2018-03-20 02:02:02", - priority=Value("High", "High"), - changes=[], - ), - Issue("D-4", - summary="Debt 4", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-01-04 01:01:01", - resolution=None, - resolutiondate=None, - priority=Value("Medium", "Medium"), - changes=[], - ), - Issue("D-5", - summary="Debt 5", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-02-05 01:01:01", - resolution="Done", - resolutiondate="2018-02-20 02:02:02", - priority=Value("High", "High"), - changes=[], - ), - Issue("D-6", - summary="Debt 6", - issuetype=Value("Tech Debt", "Tech Debt"), - status=Value("Closed", "closed"), - created="2018-03-06 01:01:01", - resolution=None, - resolutiondate=None, - priority=Value("Medium", "Medium"), - changes=[], - ), - ]) + +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, + { + "debt_query": 'issueType = "Tech Debt"', + "debt_priority_field": "Priority", + "debt_priority_values": ["Low", "Medium", "High"], + "debt_chart": "debt-chart.png", + "debt_chart_title": "Debt chart", + "debt_window": 3, + "debt_age_chart": "debt-age-chart.png", + "debt_age_chart_title": "Debt age", + "debt_age_chart_bins": [10, 20, 30], + }, + ) + + +@pytest.fixture(name="jira") +def fixture_jira(fields): + return JIRA( + fields=fields, + issues=[ + Issue( + "D-1", + summary="Debt 1", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-01-01 01:01:01", + resolution="Done", + resolutiondate="2018-03-20 02:02:02", + priority=Value("High", "High"), + changes=[], + ), + Issue( + "D-2", + summary="Debt 2", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-01-02 01:01:01", + resolution="Done", + resolutiondate="2018-01-20 02:02:02", + priority=Value("Medium", "Medium"), + changes=[], + ), + Issue( + "D-3", + summary="Debt 3", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-02-03 01:01:01", + resolution="Done", + resolutiondate="2018-03-20 02:02:02", + priority=Value("High", "High"), + changes=[], + ), + Issue( + "D-4", + summary="Debt 4", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-01-04 01:01:01", + resolution=None, + resolutiondate=None, + priority=Value("Medium", "Medium"), + changes=[], + ), + Issue( + "D-5", + summary="Debt 5", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-02-05 01:01:01", + resolution="Done", + resolutiondate="2018-02-20 02:02:02", + priority=Value("High", "High"), + changes=[], + ), + Issue( + "D-6", + summary="Debt 6", + issuetype=Value("Tech Debt", "Tech Debt"), + status=Value("Closed", "closed"), + created="2018-03-06 01:01:01", + resolution=None, + resolutiondate=None, + priority=Value("Medium", "Medium"), + changes=[], + ), + ], + ) + def test_no_query(jira, settings): query_manager = QueryManager(jira, settings) results = {} - settings = extend_dict(settings, { - 'debt_query': None - }) + settings = extend_dict(settings, {"debt_query": None}) calculator = DebtCalculator(query_manager, settings, results) data = calculator.run() assert data is None + def test_columns(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -116,7 +126,8 @@ def test_columns(jira, settings): data = calculator.run() - assert list(data.columns) == ['key', 'priority', 'created', 'resolved', 'age'] + assert list(data.columns) == ["key", "priority", "created", "resolved", "age"] + def test_empty(fields, settings): jira = JIRA(fields=fields, issues=[]) @@ -136,20 +147,54 @@ def test_breakdown(jira, settings): data = calculator.run(now=datetime.datetime(2018, 3, 21, 2, 2, 2)) - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('78 days 01:01:01'), 'priority': 'High'}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'age': Timedelta('18 days 01:01:01'), 'priority': 'Medium'}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('45 days 01:01:01'), 'priority': 'High'}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'age': Timedelta('76 days 01:01:01'), 'priority': 'Medium'}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'age': Timedelta('15 days 01:01:01'), 'priority': 'High'}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'age': Timedelta('15 days 01:01:01'), 'priority': 'Medium'}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "age": Timedelta("78 days 01:01:01"), + "priority": "High", + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "age": Timedelta("18 days 01:01:01"), + "priority": "Medium", + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "age": Timedelta("45 days 01:01:01"), + "priority": "High", + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "age": Timedelta("76 days 01:01:01"), + "priority": "Medium", + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "age": Timedelta("15 days 01:01:01"), + "priority": "High", + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "age": Timedelta("15 days 01:01:01"), + "priority": "Medium", + }, ] def test_no_priority_field(jira, settings): - settings = extend_dict(settings, { - 'debt_priority_field': None - }) + settings = extend_dict(settings, {"debt_priority_field": None}) query_manager = QueryManager(jira, settings) results = {} @@ -157,11 +202,47 @@ def test_no_priority_field(jira, settings): data = calculator.run(now=datetime.datetime(2018, 3, 21, 2, 2, 2)) - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('78 days 01:01:01'), 'priority': None}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'age': Timedelta('18 days 01:01:01'), 'priority': None}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'age': Timedelta('45 days 01:01:01'), 'priority': None}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'age': Timedelta('76 days 01:01:01'), 'priority': None}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'age': Timedelta('15 days 01:01:01'), 'priority': None}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'age': Timedelta('15 days 01:01:01'), 'priority': None}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "age": Timedelta("78 days 01:01:01"), + "priority": None, + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "age": Timedelta("18 days 01:01:01"), + "priority": None, + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "age": Timedelta("45 days 01:01:01"), + "priority": None, + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "age": Timedelta("76 days 01:01:01"), + "priority": None, + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "age": Timedelta("15 days 01:01:01"), + "priority": None, + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "age": Timedelta("15 days 01:01:01"), + "priority": None, + }, ] diff --git a/jira_agile_metrics/calculators/defects.py b/jira_agile_metrics/calculators/defects.py index 8d30790..367611b 100644 --- a/jira_agile_metrics/calculators/defects.py +++ b/jira_agile_metrics/calculators/defects.py @@ -1,14 +1,16 @@ import logging -import dateutil.parser -import pandas as pd +import dateutil.parser import matplotlib.pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import breakdown_by_month, set_chart_style + logger = logging.getLogger(__name__) + class DefectsCalculator(Calculator): """Calculate defect concentration @@ -33,45 +35,53 @@ class DefectsCalculator(Calculator): def run(self): - query = self.settings['defects_query'] + query = self.settings["defects_query"] # This calculation is expensive. Only run it if we have a query. if not query: logger.debug("Not calculating defects chart data as no query specified") return None - + # Get the fields - priority_field = self.settings['defects_priority_field'] + priority_field = self.settings["defects_priority_field"] priority_field_id = self.query_manager.field_name_to_id(priority_field) if priority_field else None - type_field = self.settings['defects_type_field'] + type_field = self.settings["defects_type_field"] type_field_id = self.query_manager.field_name_to_id(type_field) if type_field else None - environment_field = self.settings['defects_environment_field'] + environment_field = self.settings["defects_environment_field"] environment_field_id = self.query_manager.field_name_to_id(environment_field) if environment_field else None - + # Build data frame - columns = ['key', 'priority', 'type', 'environment', 'created', 'resolved'] + columns = ["key", "priority", "type", "environment", "created", "resolved"] series = { - 'key': {'data': [], 'dtype': 'str'}, - 'priority': {'data': [], 'dtype': 'str'}, - 'type': {'data': [], 'dtype': 'str'}, - 'environment': {'data': [], 'dtype': 'str'}, - 'created': {'data': [], 'dtype': 'datetime64[ns]'}, - 'resolved': {'data': [], 'dtype': 'datetime64[ns]'}, + "key": {"data": [], "dtype": "str"}, + "priority": {"data": [], "dtype": "str"}, + "type": {"data": [], "dtype": "str"}, + "environment": {"data": [], "dtype": "str"}, + "created": {"data": [], "dtype": "datetime64[ns]"}, + "resolved": {"data": [], "dtype": "datetime64[ns]"}, } for issue in self.query_manager.find_issues(query, expand=None): - series['key']['data'].append(issue.key) - series['priority']['data'].append(self.query_manager.resolve_field_value(issue, priority_field_id) if priority_field else None) - series['type']['data'].append(self.query_manager.resolve_field_value(issue, type_field_id) if type_field else None) - series['environment']['data'].append(self.query_manager.resolve_field_value(issue, environment_field_id) if environment_field else None) - series['created']['data'].append(dateutil.parser.parse(issue.fields.created)) - series['resolved']['data'].append(dateutil.parser.parse(issue.fields.resolutiondate) if issue.fields.resolutiondate else None) + series["key"]["data"].append(issue.key) + series["priority"]["data"].append( + self.query_manager.resolve_field_value(issue, priority_field_id) if priority_field else None + ) + series["type"]["data"].append( + self.query_manager.resolve_field_value(issue, type_field_id) if type_field else None + ) + series["environment"]["data"].append( + self.query_manager.resolve_field_value(issue, environment_field_id) if environment_field else None + ) + series["created"]["data"].append(dateutil.parser.parse(issue.fields.created)) + series["resolved"]["data"].append( + dateutil.parser.parse(issue.fields.resolutiondate) if issue.fields.resolutiondate else None + ) data = {} - for k, v in series.items(): - data[k] = pd.Series(v['data'], dtype=v['dtype']) + for key, value in series.items(): + data[key] = pd.Series(value["data"], dtype=value["dtype"]) return pd.DataFrame(data, columns=columns) @@ -83,114 +93,114 @@ def write(self): if len(chart_data.index) == 0: logger.warning("Cannot draw defect charts with zero items") return - - if self.settings['defects_by_priority_chart']: - self.write_defects_by_priority_chart(chart_data, self.settings['defects_by_priority_chart']) - - if self.settings['defects_by_type_chart']: - self.write_defects_by_type_chart(chart_data, self.settings['defects_by_type_chart']) - - if self.settings['defects_by_environment_chart']: - self.write_defects_by_environment_chart(chart_data, self.settings['defects_by_environment_chart']) - + + if self.settings["defects_by_priority_chart"]: + self.write_defects_by_priority_chart(chart_data, self.settings["defects_by_priority_chart"]) + + if self.settings["defects_by_type_chart"]: + self.write_defects_by_type_chart(chart_data, self.settings["defects_by_type_chart"]) + + if self.settings["defects_by_environment_chart"]: + self.write_defects_by_environment_chart(chart_data, self.settings["defects_by_environment_chart"]) + def write_defects_by_priority_chart(self, chart_data, output_file): - window = self.settings['defects_window'] - priority_values = self.settings['defects_priority_values'] + window = self.settings["defects_window"] + priority_values = self.settings["defects_priority_values"] + + breakdown = breakdown_by_month(chart_data, "created", "resolved", "key", "priority", priority_values) - breakdown = breakdown_by_month(chart_data, 'created', 'resolved', 'key', 'priority', priority_values) - if window: breakdown = breakdown[-window:] - + if len(breakdown.index) == 0 or len(breakdown.columns) == 0: logger.warning("Cannot draw defects by priority chart with zero items") return fig, ax = plt.subplots() - + breakdown.plot.bar(ax=ax, stacked=True) - - if self.settings['defects_by_priority_chart_title']: - ax.set_title(self.settings['defects_by_priority_chart_title']) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + if self.settings["defects_by_priority_chart_title"]: + ax.set_title(self.settings["defects_by_priority_chart_title"]) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of items", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing defects by priority chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) - + def write_defects_by_type_chart(self, chart_data, output_file): - window = self.settings['defects_window'] - type_values = self.settings['defects_type_values'] + window = self.settings["defects_window"] + type_values = self.settings["defects_type_values"] + + breakdown = breakdown_by_month(chart_data, "created", "resolved", "key", "type", type_values) - breakdown = breakdown_by_month(chart_data, 'created', 'resolved', 'key', 'type', type_values) - if window: breakdown = breakdown[-window:] - + if len(breakdown.index) == 0 or len(breakdown.columns) == 0: logger.warning("Cannot draw defects by type chart with zero items") return fig, ax = plt.subplots() - + breakdown.plot.bar(ax=ax, stacked=True) - - if self.settings['defects_by_type_chart_title']: - ax.set_title(self.settings['defects_by_type_chart_title']) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + if self.settings["defects_by_type_chart_title"]: + ax.set_title(self.settings["defects_by_type_chart_title"]) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of items", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing defects by type chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) - + def write_defects_by_environment_chart(self, chart_data, output_file): - window = self.settings['defects_window'] - environment_values = self.settings['defects_environment_values'] + window = self.settings["defects_window"] + environment_values = self.settings["defects_environment_values"] + + breakdown = breakdown_by_month(chart_data, "created", "resolved", "key", "environment", environment_values) - breakdown = breakdown_by_month(chart_data, 'created', 'resolved', 'key', 'environment', environment_values) - if window: breakdown = breakdown[-window:] - + if len(breakdown.index) == 0 or len(breakdown.columns) == 0: logger.warning("Cannot draw defects by environment chart with zero items") return fig, ax = plt.subplots() - + breakdown.plot.bar(ax=ax, stacked=True) - - if self.settings['defects_by_environment_chart_title']: - ax.set_title(self.settings['defects_by_environment_chart_title']) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + if self.settings["defects_by_environment_chart_title"]: + ax.set_title(self.settings["defects_by_environment_chart_title"]) + + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of items", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing defects by environment chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/defects_test.py b/jira_agile_metrics/calculators/defects_test.py index 35d27aa..ce14a87 100644 --- a/jira_agile_metrics/calculators/defects_test.py +++ b/jira_agile_metrics/calculators/defects_test.py @@ -1,133 +1,141 @@ -import pytest from pandas import Timestamp, NaT +import pytest -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxFieldValue as Value -) - -from ..utils import extend_dict - +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxFieldValue as Value from ..querymanager import QueryManager +from ..utils import extend_dict from .defects import DefectsCalculator -@pytest.fixture -def fields(minimal_fields): + +@pytest.fixture(name="fields") +def fixture_fields(minimal_fields): return minimal_fields + [ - {'id': 'priority', 'name': 'Priority'}, - {'id': 'customfield_001', 'name': 'Environment'}, - {'id': 'customfield_002', 'name': 'Defect type'}, + {"id": "priority", "name": "Priority"}, + {"id": "customfield_001", "name": "Environment"}, + {"id": "customfield_002", "name": "Defect type"}, ] -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'defects_query': 'issueType = Defect', - 'defects_window': 3, - 'defects_priority_field': 'Priority', - 'defects_priority_values': ['Low', 'Medium', 'High'], - 'defects_type_field': 'Defect type', - 'defects_type_values': ['Config', 'Data', 'Code'], - 'defects_environment_field': 'Environment', - 'defects_environment_values': ['SIT', 'UAT', 'PROD'], - - 'defects_by_priority_chart': 'defects-by-priority.png', - 'defects_by_priority_chart_title': 'Defects by priority', - 'defects_by_type_chart': 'defects-by-type.png', - 'defects_by_type_chart_title': 'Defects by type', - 'defects_by_environment_chart': 'defects-by-environment.png', - 'defects_by_environment_chart_title': 'Defects by environment', - }) - -@pytest.fixture -def jira(fields): - return JIRA(fields=fields, issues=[ - Issue("D-1", - summary="Debt 1", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-01-01 01:01:01", - resolution="Done", - resolutiondate="2018-03-20 02:02:02", - priority=Value("High", "High"), - customfield_001=Value(None, "PROD"), - customfield_002=Value(None, "Config"), - changes=[], - ), - Issue("D-2", - summary="Debt 2", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-01-02 01:01:01", - resolution="Done", - resolutiondate="2018-01-20 02:02:02", - priority=Value("Medium", "Medium"), - customfield_001=Value(None, "SIT"), - customfield_002=Value(None, "Config"), - changes=[], - ), - Issue("D-3", - summary="Debt 3", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-02-03 01:01:01", - resolution="Done", - resolutiondate="2018-03-20 02:02:02", - priority=Value("High", "High"), - customfield_001=Value(None, "UAT"), - customfield_002=Value(None, "Config"), - changes=[], - ), - Issue("D-4", - summary="Debt 4", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-01-04 01:01:01", - resolution=None, - resolutiondate=None, - priority=Value("Medium", "Medium"), - customfield_001=Value(None, "PROD"), - customfield_002=Value(None, "Data"), - changes=[], - ), - Issue("D-5", - summary="Debt 5", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-02-05 01:01:01", - resolution="Done", - resolutiondate="2018-02-20 02:02:02", - priority=Value("High", "High"), - customfield_001=Value(None, "SIT"), - customfield_002=Value(None, "Data"), - changes=[], - ), - Issue("D-6", - summary="Debt 6", - issuetype=Value("Bug", "Bug"), - status=Value("Closed", "closed"), - created="2018-03-06 01:01:01", - resolution=None, - resolutiondate=None, - priority=Value("Medium", "Medium"), - customfield_001=Value(None, "UAT"), - customfield_002=Value(None, "Data"), - changes=[], - ), - ]) + +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, + { + "defects_query": "issueType = Defect", + "defects_window": 3, + "defects_priority_field": "Priority", + "defects_priority_values": ["Low", "Medium", "High"], + "defects_type_field": "Defect type", + "defects_type_values": ["Config", "Data", "Code"], + "defects_environment_field": "Environment", + "defects_environment_values": ["SIT", "UAT", "PROD"], + "defects_by_priority_chart": "defects-by-priority.png", + "defects_by_priority_chart_title": "Defects by priority", + "defects_by_type_chart": "defects-by-type.png", + "defects_by_type_chart_title": "Defects by type", + "defects_by_environment_chart": "defects-by-environment.png", + "defects_by_environment_chart_title": "Defects by environment", + }, + ) + + +@pytest.fixture(name="jira") +def fixture_jira(fields): + return JIRA( + fields=fields, + issues=[ + Issue( + "D-1", + summary="Debt 1", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-01-01 01:01:01", + resolution="Done", + resolutiondate="2018-03-20 02:02:02", + priority=Value("High", "High"), + customfield_001=Value(None, "PROD"), + customfield_002=Value(None, "Config"), + changes=[], + ), + Issue( + "D-2", + summary="Debt 2", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-01-02 01:01:01", + resolution="Done", + resolutiondate="2018-01-20 02:02:02", + priority=Value("Medium", "Medium"), + customfield_001=Value(None, "SIT"), + customfield_002=Value(None, "Config"), + changes=[], + ), + Issue( + "D-3", + summary="Debt 3", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-02-03 01:01:01", + resolution="Done", + resolutiondate="2018-03-20 02:02:02", + priority=Value("High", "High"), + customfield_001=Value(None, "UAT"), + customfield_002=Value(None, "Config"), + changes=[], + ), + Issue( + "D-4", + summary="Debt 4", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-01-04 01:01:01", + resolution=None, + resolutiondate=None, + priority=Value("Medium", "Medium"), + customfield_001=Value(None, "PROD"), + customfield_002=Value(None, "Data"), + changes=[], + ), + Issue( + "D-5", + summary="Debt 5", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-02-05 01:01:01", + resolution="Done", + resolutiondate="2018-02-20 02:02:02", + priority=Value("High", "High"), + customfield_001=Value(None, "SIT"), + customfield_002=Value(None, "Data"), + changes=[], + ), + Issue( + "D-6", + summary="Debt 6", + issuetype=Value("Bug", "Bug"), + status=Value("Closed", "closed"), + created="2018-03-06 01:01:01", + resolution=None, + resolutiondate=None, + priority=Value("Medium", "Medium"), + customfield_001=Value(None, "UAT"), + customfield_002=Value(None, "Data"), + changes=[], + ), + ], + ) + def test_no_query(jira, settings): query_manager = QueryManager(jira, settings) results = {} - settings = extend_dict(settings, { - 'defects_query': None - }) + settings = extend_dict(settings, {"defects_query": None}) calculator = DefectsCalculator(query_manager, settings, results) data = calculator.run() assert data is None + def test_columns(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -135,7 +143,8 @@ def test_columns(jira, settings): data = calculator.run() - assert list(data.columns) == ['key', 'priority', 'type', 'environment', 'created', 'resolved'] + assert list(data.columns) == ["key", "priority", "type", "environment", "created", "resolved"] + def test_empty(fields, settings): jira = JIRA(fields=fields, issues=[]) @@ -155,20 +164,60 @@ def test_breakdown(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': 'PROD', 'type': 'Config'}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'priority': 'Medium', 'environment': 'SIT', 'type': 'Config'}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': 'UAT', 'type': 'Config'}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': 'PROD', 'type': 'Data'}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'priority': 'High', 'environment': 'SIT', 'type': 'Data'}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': 'UAT', 'type': 'Data'}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": "PROD", + "type": "Config", + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "priority": "Medium", + "environment": "SIT", + "type": "Config", + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": "UAT", + "type": "Config", + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": "PROD", + "type": "Data", + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "priority": "High", + "environment": "SIT", + "type": "Data", + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": "UAT", + "type": "Data", + }, ] def test_no_priority_field(jira, settings): - settings = extend_dict(settings, { - 'defects_priority_field': None - }) + settings = extend_dict(settings, {"defects_priority_field": None}) query_manager = QueryManager(jira, settings) results = {} @@ -176,19 +225,60 @@ def test_no_priority_field(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': None, 'environment': 'PROD', 'type': 'Config'}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'priority': None, 'environment': 'SIT', 'type': 'Config'}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': None, 'environment': 'UAT', 'type': 'Config'}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'priority': None, 'environment': 'PROD', 'type': 'Data'}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'priority': None, 'environment': 'SIT', 'type': 'Data'}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'priority': None, 'environment': 'UAT', 'type': 'Data'}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": None, + "environment": "PROD", + "type": "Config", + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "priority": None, + "environment": "SIT", + "type": "Config", + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": None, + "environment": "UAT", + "type": "Config", + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "priority": None, + "environment": "PROD", + "type": "Data", + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "priority": None, + "environment": "SIT", + "type": "Data", + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "priority": None, + "environment": "UAT", + "type": "Data", + }, ] + def test_no_type_field(jira, settings): - settings = extend_dict(settings, { - 'defects_type_field': None - }) + settings = extend_dict(settings, {"defects_type_field": None}) query_manager = QueryManager(jira, settings) results = {} @@ -196,19 +286,60 @@ def test_no_type_field(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': 'PROD', 'type': None}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'priority': 'Medium', 'environment': 'SIT', 'type': None}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': 'UAT', 'type': None}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': 'PROD', 'type': None}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'priority': 'High', 'environment': 'SIT', 'type': None}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': 'UAT', 'type': None}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": "PROD", + "type": None, + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "priority": "Medium", + "environment": "SIT", + "type": None, + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": "UAT", + "type": None, + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": "PROD", + "type": None, + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "priority": "High", + "environment": "SIT", + "type": None, + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": "UAT", + "type": None, + }, ] + def test_no_environment_field(jira, settings): - settings = extend_dict(settings, { - 'defects_environment_field': None - }) + settings = extend_dict(settings, {"defects_environment_field": None}) query_manager = QueryManager(jira, settings) results = {} @@ -216,11 +347,53 @@ def test_no_environment_field(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'D-1', 'created': Timestamp('2018-01-01 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': None, 'type': 'Config'}, - {'key': 'D-2', 'created': Timestamp('2018-01-02 01:01:01'), 'resolved': Timestamp('2018-01-20 02:02:02'), 'priority': 'Medium', 'environment': None, 'type': 'Config'}, - {'key': 'D-3', 'created': Timestamp('2018-02-03 01:01:01'), 'resolved': Timestamp('2018-03-20 02:02:02'), 'priority': 'High', 'environment': None, 'type': 'Config'}, - {'key': 'D-4', 'created': Timestamp('2018-01-04 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': None, 'type': 'Data'}, - {'key': 'D-5', 'created': Timestamp('2018-02-05 01:01:01'), 'resolved': Timestamp('2018-02-20 02:02:02'), 'priority': 'High', 'environment': None, 'type': 'Data'}, - {'key': 'D-6', 'created': Timestamp('2018-03-06 01:01:01'), 'resolved': NaT, 'priority': 'Medium', 'environment': None, 'type': 'Data'}, + assert data.to_dict("records") == [ + { + "key": "D-1", + "created": Timestamp("2018-01-01 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": None, + "type": "Config", + }, + { + "key": "D-2", + "created": Timestamp("2018-01-02 01:01:01"), + "resolved": Timestamp("2018-01-20 02:02:02"), + "priority": "Medium", + "environment": None, + "type": "Config", + }, + { + "key": "D-3", + "created": Timestamp("2018-02-03 01:01:01"), + "resolved": Timestamp("2018-03-20 02:02:02"), + "priority": "High", + "environment": None, + "type": "Config", + }, + { + "key": "D-4", + "created": Timestamp("2018-01-04 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": None, + "type": "Data", + }, + { + "key": "D-5", + "created": Timestamp("2018-02-05 01:01:01"), + "resolved": Timestamp("2018-02-20 02:02:02"), + "priority": "High", + "environment": None, + "type": "Data", + }, + { + "key": "D-6", + "created": Timestamp("2018-03-06 01:01:01"), + "resolved": NaT, + "priority": "Medium", + "environment": None, + "type": "Data", + }, ] diff --git a/jira_agile_metrics/calculators/forecast.py b/jira_agile_metrics/calculators/forecast.py index d5780d0..9f27367 100644 --- a/jira_agile_metrics/calculators/forecast.py +++ b/jira_agile_metrics/calculators/forecast.py @@ -1,21 +1,21 @@ -import logging import datetime +import logging -import pandas as pd import matplotlib.pyplot as plt import matplotlib.transforms +import pandas as pd from ..calculator import Calculator from ..utils import set_chart_style, to_days_since_epoch - -from .cycletime import CycleTimeCalculator from .burnup import BurnupCalculator +from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class BurnupForecastCalculator(Calculator): - """Draw a burn-up chart with a forecast run to completion - """ + """Draw a burn-up chart with a forecast run to completion""" def run(self): burnup_data = self.get_result(BurnupCalculator) @@ -25,12 +25,12 @@ def run(self): return None # This calculation is expensive. Only run it if we intend to write a file. - if not self.settings['burnup_forecast_chart']: + if not self.settings["burnup_forecast_chart"]: logger.debug("Not calculating burnup forecast chart data as no output file specified") return None - backlog_column = self.settings['backlog_column'] - done_column = self.settings['done_column'] + backlog_column = self.settings["backlog_column"] + done_column = self.settings["done_column"] if backlog_column not in burnup_data.columns: logger.error("Backlog column %s does not exist", backlog_column) @@ -43,39 +43,50 @@ def run(self): logger.warning("Unable to draw burnup forecast chart with zero completed items.") return None - throughput_window_end = self.settings['burnup_forecast_chart_throughput_window_end'] or cycle_data[done_column].max().date() - throughput_window = self.settings['burnup_forecast_chart_throughput_window'] + throughput_window_end = ( + self.settings["burnup_forecast_chart_throughput_window_end"] or cycle_data[done_column].max().date() + ) + throughput_window = self.settings["burnup_forecast_chart_throughput_window"] throughput_window_start = throughput_window_end - datetime.timedelta(days=throughput_window) - logger.info("Sampling throughput between %s and %s", throughput_window_start.isoformat(), throughput_window_end.isoformat()) + logger.info( + "Sampling throughput between %s and %s", + throughput_window_start.isoformat(), + throughput_window_end.isoformat(), + ) start_value = burnup_data[done_column].max() - target = self.settings['burnup_forecast_chart_target'] or burnup_data[backlog_column].max() + target = self.settings["burnup_forecast_chart_target"] or burnup_data[backlog_column].max() logger.info("Running forecast to completion of %d items", target) - trials = self.settings['burnup_forecast_chart_trials'] + trials = self.settings["burnup_forecast_chart_trials"] logger.debug("Running %d trials to calculate probable forecast outcomes", trials) - throughput_data = calculate_daily_throughput(cycle_data[ - (cycle_data[done_column] >= pd.Timestamp(throughput_window_start)) & - (cycle_data[done_column] <= pd.Timestamp(throughput_window_end)) - ], done_column, throughput_window_start, throughput_window_end) + throughput_data = calculate_daily_throughput( + cycle_data[ + (cycle_data[done_column] >= pd.Timestamp(throughput_window_start)) + & (cycle_data[done_column] <= pd.Timestamp(throughput_window_end)) + ], + done_column, + throughput_window_start, + throughput_window_end, + ) # degenerate case - no steps, abort - if throughput_data['count'].sum() <= 0: + if throughput_data["count"].sum() <= 0: logger.warning("No throughput samples available, aborting forecast simulations") return None - + return burnup_monte_carlo( start_value=start_value, target_value=target, start_date=burnup_data.index.max(), frequency=throughput_data.index.freq, draw_sample=throughput_sampler(throughput_data, start_value, target), - trials=trials + trials=trials, ) def write(self): - output_file = self.settings['burnup_forecast_chart'] + output_file = self.settings["burnup_forecast_chart"] if not output_file: logger.debug("No output file specified for burnup forecast chart") return @@ -85,9 +96,9 @@ def write(self): logger.warning("Cannot draw burnup forecast chart with zero items") return - window = self.settings['burnup_forecast_window'] + window = self.settings["burnup_forecast_window"] if window: - start = burnup_data.index.max() - pd.Timedelta(window, 'D') + start = burnup_data.index.max() - pd.Timedelta(window, "D") burnup_data = burnup_data[start:] if len(burnup_data.index) == 0: @@ -99,24 +110,24 @@ def write(self): logger.warning("Cannot draw burnup forecast chart with zero completed trials") return - deadline = self.settings['burnup_forecast_chart_deadline'] + deadline = self.settings["burnup_forecast_chart_deadline"] if deadline: logger.debug("Forecasting with deadline %s", deadline.isoformat()) - deadline_confidence = self.settings['burnup_forecast_chart_deadline_confidence'] + deadline_confidence = self.settings["burnup_forecast_chart_deadline_confidence"] if deadline_confidence: logger.debug("Forecasting deadline at %.2f%% confidence", deadline_confidence * 100.0) - quantiles = self.settings['quantiles'] - logger.debug("Showing forecast at quantiles %s", ', '.join(['%.2f' % (q * 100.0) for q in quantiles])) + quantiles = self.settings["quantiles"] + logger.debug("Showing forecast at quantiles %s", ", ".join(["%.2f" % (q * 100.0) for q in quantiles])) - backlog_column = self.settings['backlog_column'] - target = self.settings['burnup_forecast_chart_target'] or burnup_data[backlog_column].max() + backlog_column = self.settings["backlog_column"] + target = self.settings["burnup_forecast_chart_target"] or burnup_data[backlog_column].max() fig, ax = plt.subplots() - if self.settings['burnup_forecast_chart_title']: - ax.set_title(self.settings['burnup_forecast_chart_title']) + if self.settings["burnup_forecast_chart_title"]: + ax.set_title(self.settings["burnup_forecast_chart_title"]) fig.autofmt_xdate() @@ -134,7 +145,7 @@ def write(self): for col in mc_trials: mc_trials[col][mc_trials[col] > target] = target - mc_trials.plot.line(ax=ax, legend=False, color='#ff9696', linestyle='solid', linewidth=0.1) + mc_trials.plot.line(ax=ax, legend=False, color="#ff9696", linestyle="solid", linewidth=0.1) # draw quantiles at finish line finish_dates = mc_trials.apply(pd.Series.last_valid_index) @@ -143,19 +154,22 @@ def write(self): if deadline_confidence is not None: deadline_confidence_quantiles = finish_dates.quantile([deadline_confidence]).dt.normalize() if len(deadline_confidence_quantiles) > 0: - deadline_confidence_date = pd.Timestamp(deadline_confidence_quantiles.values[0]).to_pydatetime().date() + deadline_confidence_date = ( + pd.Timestamp(deadline_confidence_quantiles.values[0]).to_pydatetime().date() + ) bottom, top = ax.get_ylim() for percentile, value in finish_date_quantiles.iteritems(): - ax.vlines(value, bottom, target, linestyles='--', linewidths=0.5) - ax.annotate("%.0f%% (%s)" % ((percentile * 100), value.strftime("%d/%m/%Y"),), + ax.vlines(value, bottom, target, linestyles="--", linewidths=0.5) + ax.annotate( + "%.0f%% (%s)" % ((percentile * 100), value.strftime("%d/%m/%Y")), xy=(to_days_since_epoch(value.to_pydatetime().date()), 0.35), xycoords=transform_vertical, rotation="vertical", ha="left", va="top", fontsize="x-small", - backgroundcolor="#ffffff" + backgroundcolor="#ffffff", ) # draw deadline (pun not intended...) @@ -165,21 +179,17 @@ def write(self): deadline_dse = to_days_since_epoch(deadline) - ax.vlines(deadline, bottom, target, color='r', linestyles='-', linewidths=0.5) - ax.annotate("Due: %s" % (deadline.strftime("%d/%m/%Y"),), + ax.vlines(deadline, bottom, target, color="r", linestyles="-", linewidths=0.5) + ax.annotate( + "Due: %s" % (deadline.strftime("%d/%m/%Y"),), xy=(deadline, target), xytext=(0.95, 0.95), - textcoords='axes fraction', - arrowprops={ - 'arrowstyle': '->', - 'color': 'r', - 'linewidth': 1.1, - 'connectionstyle': 'arc3,rad=.1', - }, + textcoords="axes fraction", + arrowprops={"arrowstyle": "->", "color": "r", "linewidth": 1.1, "connectionstyle": "arc3,rad=.1"}, fontsize="x-small", ha="right", - color='red', - backgroundcolor="#ffffff" + color="red", + backgroundcolor="#ffffff", ) # Make sure we can see deadline line @@ -190,29 +200,33 @@ def write(self): if deadline_confidence_date is not None: deadline_delta = (deadline - deadline_confidence_date).days - ax.text(0.02, 0.5, - "Deadline: %s\nForecast (%.0f%%): %s\nSlack: %d days" % ( + ax.text( + 0.02, + 0.5, + "Deadline: %s\nForecast (%.0f%%): %s\nSlack: %d days" + % ( deadline.strftime("%d/%m/%Y"), (deadline_confidence * 100), deadline_confidence_date.strftime("%d/%m/%Y"), - deadline_delta + deadline_delta, ), transform=ax.transAxes, fontsize=14, - verticalalignment='center', - bbox=dict(boxstyle='round', facecolor='r' if deadline_delta < 0 else 'g', alpha=0.5), + verticalalignment="center", + bbox=dict(boxstyle="round", facecolor="r" if deadline_delta < 0 else "g", alpha=0.5), ) # Place target line left, right = ax.get_xlim() - ax.hlines(target, left, right, linestyles='--', linewidths=1) - ax.annotate("Target: %d" % (target,), + ax.hlines(target, left, right, linestyles="--", linewidths=1) + ax.annotate( + "Target: %d" % (target,), xy=(0.02, target), xycoords=transform_horizontal, fontsize="x-small", ha="left", va="center", - backgroundcolor="#ffffff" + backgroundcolor="#ffffff", ) # Give some headroom above the target line so we can see it @@ -224,52 +238,50 @@ def write(self): handles, labels = ax.get_legend_handles_labels() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) - ax.legend(handles[:2], labels[:2], loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=2) + ax.legend(handles[:2], labels[:2], loc="upper center", bbox_to_anchor=(0.5, -0.2), ncol=2) set_chart_style() # Write file logger.info("Writing burnup forecast chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) + def calculate_daily_throughput(cycle_data, done_column, window_start, window_end): - return cycle_data[[done_column, 'key']] \ - .rename(columns={'key': 'count', done_column: 'completed_timestamp'}) \ - .groupby('completed_timestamp').count() \ - .resample("1D").sum() \ - .reindex(index=pd.date_range(start=window_start, end=window_end, freq='D')) \ + return ( + cycle_data[[done_column, "key"]] + .rename(columns={"key": "count", done_column: "completed_timestamp"}) + .groupby("completed_timestamp") + .count() + .resample("1D") + .sum() + .reindex(index=pd.date_range(start=window_start, end=window_end, freq="D")) .fillna(0) + ) + def throughput_sampler(throughput_data, start_value, target): - """Return a function that can efficiently draw samples from `throughput_data` - """ - sample_buffer_size = int(2 * (target - start_value) / throughput_data['count'].mean()) + """Return a function that can efficiently draw samples from `throughput_data`""" + sample_buffer_size = int(2 * (target - start_value) / throughput_data["count"].mean()) sample_buffer = dict(idx=0, buffer=None) def get_throughput_sample(): - if sample_buffer['buffer'] is None or sample_buffer['idx'] >= len(sample_buffer['buffer'].index): - sample_buffer['buffer'] = throughput_data['count'].sample(sample_buffer_size, replace=True) - sample_buffer['idx'] = 0 + if sample_buffer["buffer"] is None or sample_buffer["idx"] >= len(sample_buffer["buffer"].index): + sample_buffer["buffer"] = throughput_data["count"].sample(sample_buffer_size, replace=True) + sample_buffer["idx"] = 0 + + sample_buffer["idx"] += 1 + return sample_buffer["buffer"].iloc[sample_buffer["idx"] - 1] - sample_buffer['idx'] += 1 - return sample_buffer['buffer'].iloc[sample_buffer['idx'] - 1] - return get_throughput_sample -def burnup_monte_carlo( - start_value, - target_value, - start_date, - frequency, - draw_sample, - trials=100, - max_iterations=9999, -): + +def burnup_monte_carlo(start_value, target_value, start_date, frequency, draw_sample, trials=100, max_iterations=9999): series = {} - for t in range(trials): + for trial in range(trials): current_date = start_date current_value = start_value @@ -283,6 +295,6 @@ def burnup_monte_carlo( dates.append(current_date) steps.append(min(current_value, target_value)) # don't overshoot the target - series["Trial %d" % t] = pd.Series(steps, index=dates, name="Trial %d" % t) + series["Trial %d" % trial] = pd.Series(steps, index=dates, name="Trial %d" % trial) return pd.DataFrame(series) diff --git a/jira_agile_metrics/calculators/forecast_test.py b/jira_agile_metrics/calculators/forecast_test.py index 507d759..be063d1 100644 --- a/jira_agile_metrics/calculators/forecast_test.py +++ b/jira_agile_metrics/calculators/forecast_test.py @@ -1,43 +1,54 @@ -import pytest import datetime + import numpy as np from pandas import DataFrame, Timestamp, date_range +import pytest -from .cycletime import CycleTimeCalculator -from .cfd import CFDCalculator +from ..utils import extend_dict from .burnup import BurnupCalculator +from .cfd import CFDCalculator +from .cycletime import CycleTimeCalculator from .forecast import BurnupForecastCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'burnup_forecast_chart_throughput_window_end': None, - 'burnup_forecast_chart_throughput_window': 8, - 'burnup_forecast_chart_target': 30, - 'burnup_forecast_chart_trials': 10, - 'burnup_forecast_chart_deadline': datetime.date(2018, 1, 30), - 'burnup_forecast_chart_deadline_confidence': 0.85, - 'quantiles': [0.1, 0.3, 0.5], - 'burnup_forecast_chart': 'forecast.png' # without a file, calculator stops - }) - -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, + { + "burnup_forecast_chart_throughput_window_end": None, + "burnup_forecast_chart_throughput_window": 8, + "burnup_forecast_chart_target": 30, + "burnup_forecast_chart_trials": 10, + "burnup_forecast_chart_deadline": datetime.date(2018, 1, 30), + "burnup_forecast_chart_deadline_confidence": 0.85, + "quantiles": [0.1, 0.3, 0.5], + "burnup_forecast_chart": "forecast.png", # without a file, calculator stops + }, + ) + + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(query_manager, settings, large_cycle_time_results): + +@pytest.fixture(name="results") +def fixture_results(query_manager, settings, large_cycle_time_results): results = large_cycle_time_results.copy() results.update({CFDCalculator: CFDCalculator(query_manager, settings, results).run()}) results.update({BurnupCalculator: BurnupCalculator(query_manager, settings, results).run()}) return results + def test_empty(query_manager, settings, minimal_cycle_time_columns): results = { CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns), - BurnupCalculator: DataFrame([], columns=['Backlog', 'Committed', 'Build', 'Test', 'Done'], index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq='D')) + BurnupCalculator: DataFrame( + [], + columns=["Backlog", "Committed", "Build", "Test", "Done"], + index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq="D"), + ), } calculator = BurnupForecastCalculator(query_manager, settings, results) @@ -45,23 +56,25 @@ def test_empty(query_manager, settings, minimal_cycle_time_columns): data = calculator.run() assert data is None + def test_columns(query_manager, settings, results): calculator = BurnupForecastCalculator(query_manager, settings, results) data = calculator.run() assert list(data.columns) == [ - 'Trial 0', - 'Trial 1', - 'Trial 2', - 'Trial 3', - 'Trial 4', - 'Trial 5', - 'Trial 6', - 'Trial 7', - 'Trial 8', - 'Trial 9' + "Trial 0", + "Trial 1", + "Trial 2", + "Trial 3", + "Trial 4", + "Trial 5", + "Trial 6", + "Trial 7", + "Trial 8", + "Trial 9", ] + def test_calculate_forecast(query_manager, settings, results): calculator = BurnupForecastCalculator(query_manager, settings, results) @@ -70,14 +83,14 @@ def test_calculate_forecast(query_manager, settings, results): # because of the random nature of this, we don't know exactly how many records # there will be, but will assume at least two assert len(data.index) > 0 - assert list(data.index)[0] == Timestamp('2018-01-09 00:00:00', freq='D') - assert list(data.index)[1] == Timestamp('2018-01-10 00:00:00', freq='D') - + assert list(data.index)[0] == Timestamp("2018-01-09 00:00:00", freq="D") + assert list(data.index)[1] == Timestamp("2018-01-10 00:00:00", freq="D") + for i in range(10): - trial_values = data['Trial %d' % i] + trial_values = data["Trial %d" % i] # remove na values at the end (not all series will need all dates) - trial_values = trial_values[:trial_values.last_valid_index()] + trial_values = trial_values[: trial_values.last_valid_index()] # check that series is monotonically increasing trial_diff = np.diff(trial_values) @@ -89,19 +102,22 @@ def test_calculate_forecast(query_manager, settings, results): # we reach the target value assert trial_values[-1] == 30 + def test_calculate_forecast_settings(query_manager, settings, results): - settings.update({ - 'backlog_column': 'Committed', - 'done_column': 'Test', - 'burnup_forecast_chart_throughput_window_end': datetime.date(2018, 1, 6), - 'burnup_forecast_chart_throughput_window': 4, - 'burnup_forecast_chart_target': None, # use max of backlog column -- 15 - 'burnup_forecast_chart_trials': 10, - 'burnup_forecast_chart_deadline': datetime.date(2018, 1, 30), - 'burnup_forecast_chart_deadline_confidence': 0.85, - 'quantiles': [0.1, 0.3, 0.5] - }) + settings.update( + { + "backlog_column": "Committed", + "done_column": "Test", + "burnup_forecast_chart_throughput_window_end": datetime.date(2018, 1, 6), + "burnup_forecast_chart_throughput_window": 4, + "burnup_forecast_chart_target": None, # use max of backlog column -- 15 + "burnup_forecast_chart_trials": 10, + "burnup_forecast_chart_deadline": datetime.date(2018, 1, 30), + "burnup_forecast_chart_deadline_confidence": 0.85, + "quantiles": [0.1, 0.3, 0.5], + } + ) results.update({CFDCalculator: CFDCalculator(query_manager, settings, results).run()}) results.update({BurnupCalculator: BurnupCalculator(query_manager, settings, results).run()}) @@ -113,14 +129,14 @@ def test_calculate_forecast_settings(query_manager, settings, results): # because of the random nature of this, we don't know exactly how many records # there will be, but will assume at least two assert len(data.index) > 0 - assert list(data.index)[0] == Timestamp('2018-01-09 00:00:00', freq='D') - assert list(data.index)[1] == Timestamp('2018-01-10 00:00:00', freq='D') - + assert list(data.index)[0] == Timestamp("2018-01-09 00:00:00", freq="D") + assert list(data.index)[1] == Timestamp("2018-01-10 00:00:00", freq="D") + for i in range(10): - trial_values = data['Trial %d' % i] + trial_values = data["Trial %d" % i] # remove na values at the end (not all series will need all dates) - trial_values = trial_values[:trial_values.last_valid_index()] + trial_values = trial_values[: trial_values.last_valid_index()] # check that series is monotonically increasing trial_diff = np.diff(trial_values) diff --git a/jira_agile_metrics/calculators/histogram.py b/jira_agile_metrics/calculators/histogram.py index a9e81f0..516fadd 100644 --- a/jira_agile_metrics/calculators/histogram.py +++ b/jira_agile_metrics/calculators/histogram.py @@ -1,16 +1,18 @@ import logging + +import matplotlib.pyplot as plt import numpy as np import pandas as pd -import matplotlib.pyplot as plt import seaborn as sns from ..calculator import Calculator from ..utils import get_extension, set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class HistogramCalculator(Calculator): """Build histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays @@ -19,12 +21,12 @@ class HistogramCalculator(Calculator): def run(self): cycle_data = self.get_result(CycleTimeCalculator) - cycle_times = cycle_data['cycle_time'].astype('timedelta64[D]').dropna().tolist() + cycle_times = cycle_data["cycle_time"].astype("timedelta64[D]").dropna().tolist() if not cycle_times: bins = range(11) else: - bins = range(int(max(cycle_times))+2) + bins = range(int(max(cycle_times)) + 2) values, edges = np.histogram(cycle_times, bins=bins, density=False) @@ -32,70 +34,69 @@ def run(self): for i, _ in enumerate(edges): if i == 0: continue - index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) + index.append("%.01f to %.01f" % (edges[i - 1], edges[i])) return pd.Series(values, name="Items", index=index) def write(self): data = self.get_result() - if self.settings['histogram_data']: - self.write_file(data, self.settings['histogram_data']) + if self.settings["histogram_data"]: + self.write_file(data, self.settings["histogram_data"]) else: logger.debug("No output file specified for histogram data") - if self.settings['histogram_chart']: - self.write_chart(data, self.settings['histogram_chart']) + if self.settings["histogram_chart"]: + self.write_chart(data, self.settings["histogram_chart"]) else: logger.debug("No output file specified for histogram chart") - def write_file(self, data, output_files): - file_data = self.get_result() - + @staticmethod + def write_file(data, output_files): for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing histogram data to %s", output_file) - if output_extension == '.json': - file_data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - file_data.to_frame(name='histogram').to_excel(output_file, 'Histogram', header=True) + if output_extension == ".json": + data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + data.to_frame(name="histogram").to_excel(output_file, "Histogram", header=True) else: - file_data.to_csv(output_file, header=True) + data.to_csv(output_file, header=True) def write_chart(self, data, output_file): cycle_data = self.get_result(CycleTimeCalculator) - chart_data = cycle_data[['cycle_time', 'completed_timestamp']].dropna(subset=['cycle_time']) + chart_data = cycle_data[["cycle_time", "completed_timestamp"]].dropna(subset=["cycle_time"]) # The `window` calculation and the chart output will fail if we don't # have at least two valid data points. - ct_days = chart_data['cycle_time'].dt.days + ct_days = chart_data["cycle_time"].dt.days if len(ct_days.index) < 2: logger.warning("Need at least 2 completed items to draw histogram") return # Slice off items before the window - window = self.settings['histogram_window'] + window = self.settings["histogram_window"] if window: - start = chart_data['completed_timestamp'].max().normalize() - pd.Timedelta(window, 'D') + start = chart_data["completed_timestamp"].max().normalize() - pd.Timedelta(window, "D") chart_data = chart_data[chart_data.completed_timestamp >= start] # Re-check that we have enough data - ct_days = chart_data['cycle_time'].dt.days + ct_days = chart_data["cycle_time"].dt.days if len(ct_days.index) < 2: logger.warning("Need at least 2 completed items to draw histogram") return - quantiles = self.settings['quantiles'] - logger.debug("Showing histogram at quantiles %s", ', '.join(['%.2f' % (q * 100.0) for q in quantiles])) + quantiles = self.settings["quantiles"] + logger.debug("Showing histogram at quantiles %s", ", ".join(["%.2f" % (q * 100.0) for q in quantiles])) fig, ax = plt.subplots() - bins = range(int(ct_days.max())+2) + bins = range(int(ct_days.max()) + 2) sns.distplot(ct_days, bins=bins, ax=ax, kde=False, axlabel="Cycle time (days)") - if self.settings['histogram_chart_title']: - ax.set_title(self.settings['histogram_chart_title']) + if self.settings["histogram_chart_title"]: + ax.set_title(self.settings["histogram_chart_title"]) _, right = ax.get_xlim() ax.set_xlim(0, right) @@ -103,13 +104,14 @@ def write_chart(self, data, output_file): # Add quantiles bottom, top = ax.get_ylim() for quantile, value in ct_days.quantile(quantiles).iteritems(): - ax.vlines(value, bottom, top - 0.001, linestyles='--', linewidths=1) - ax.annotate("%.0f%% (%.0f days)" % ((quantile * 100), value,), + ax.vlines(value, bottom, top - 0.001, linestyles="--", linewidths=1) + ax.annotate( + "%.0f%% (%.0f days)" % ((quantile * 100), value), xy=(value, top), xytext=(value - 0.1, top - 0.001), rotation="vertical", fontsize="x-small", - ha="right" + ha="right", ) ax.set_ylabel("Frequency") @@ -117,5 +119,5 @@ def write_chart(self, data, output_file): # Write file logger.info("Writing histogram chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/histogram_test.py b/jira_agile_metrics/calculators/histogram_test.py index be7ed77..74ee8e4 100644 --- a/jira_agile_metrics/calculators/histogram_test.py +++ b/jira_agile_metrics/calculators/histogram_test.py @@ -1,58 +1,53 @@ -import pytest from pandas import DataFrame +import pytest +from ..utils import extend_dict from .cycletime import CycleTimeCalculator from .histogram import HistogramCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): return extend_dict(minimal_settings, {}) -@pytest.fixture -def query_manager(minimal_query_manager): + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(large_cycle_time_results): + +@pytest.fixture(name="results") +def fixture_results(large_cycle_time_results): return extend_dict(large_cycle_time_results, {}) + def test_empty(query_manager, settings, minimal_cycle_time_columns): - results = { - CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[]) - } + results = {CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[])} calculator = HistogramCalculator(query_manager, settings, results) data = calculator.run() assert list(data.index) == [ - '0.0 to 1.0', - '1.0 to 2.0', - '2.0 to 3.0', - '3.0 to 4.0', - '4.0 to 5.0', - '5.0 to 6.0', - '6.0 to 7.0', - '7.0 to 8.0', - '8.0 to 9.0', - '9.0 to 10.0' + "0.0 to 1.0", + "1.0 to 2.0", + "2.0 to 3.0", + "3.0 to 4.0", + "4.0 to 5.0", + "5.0 to 6.0", + "6.0 to 7.0", + "7.0 to 8.0", + "8.0 to 9.0", + "9.0 to 10.0", ] assert list(data) == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + def test_calculate_histogram(query_manager, settings, results): calculator = HistogramCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.index) == [ - '0.0 to 1.0', - '1.0 to 2.0', - '2.0 to 3.0', - '3.0 to 4.0', - '4.0 to 5.0', - '5.0 to 6.0' - ] + assert list(data.index) == ["0.0 to 1.0", "1.0 to 2.0", "2.0 to 3.0", "3.0 to 4.0", "4.0 to 5.0", "5.0 to 6.0"] assert list(data) == [0, 0, 0, 0, 1, 5] diff --git a/jira_agile_metrics/calculators/impediments.py b/jira_agile_metrics/calculators/impediments.py index 21a9af2..1d9fac5 100644 --- a/jira_agile_metrics/calculators/impediments.py +++ b/jira_agile_metrics/calculators/impediments.py @@ -1,14 +1,16 @@ import logging -import pandas as pd + import matplotlib.pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import get_extension, breakdown_by_month, breakdown_by_month_sum_days, set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class ImpedimentsCalculator(Calculator): """Calculate impediments, charted by month and workflow status, either as a count of tickets that were blocked in that month, or as a sum of the total @@ -24,81 +26,84 @@ def run(self): # This calculation is expensive. Only run it if we are going to write something if not ( - self.settings['impediments_data'] or - self.settings['impediments_chart'] or - self.settings['impediments_days_chart'] or - self.settings['impediments_status_chart'] or - self.settings['impediments_status_days_chart'] + self.settings["impediments_data"] + or self.settings["impediments_chart"] + or self.settings["impediments_days_chart"] + or self.settings["impediments_status_chart"] + or self.settings["impediments_status_days_chart"] ): logger.debug("Not calculating impediments data as no output files specified") return None cycle_data = self.get_result(CycleTimeCalculator) - cycle_data = cycle_data[cycle_data.blocked_days > 0][['key', 'impediments']] + cycle_data = cycle_data[cycle_data.blocked_days > 0][["key", "impediments"]] data = [] - cycle_names = [s['name'] for s in self.settings['cycle']] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] - active_columns = cycle_names[cycle_names.index(committed_column):cycle_names.index(done_column)] + cycle_names = [s["name"] for s in self.settings["cycle"]] + committed_column = self.settings["committed_column"] + done_column = self.settings["done_column"] + active_columns = cycle_names[cycle_names.index(committed_column) : cycle_names.index(done_column)] for row in cycle_data.itertuples(): - for idx, event in enumerate(row.impediments): + for _, event in enumerate(row.impediments): # Ignore things that were impeded whilst in the backlog and/or done column # (these are mostly nonsensical, and don't really indicate blocked/wasted time) - if event['status'] not in active_columns: + if event["status"] not in active_columns: continue - data.append({ - 'key': row.key, - 'status': event['status'], - 'flag': event['flag'], - 'start': pd.Timestamp(event['start']), - 'end': pd.Timestamp(event['end']) if event['end'] else pd.NaT, - }) - - return pd.DataFrame(data, columns=['key', 'status', 'flag', 'start', 'end']) + data.append( + { + "key": row.key, + "status": event["status"], + "flag": event["flag"], + "start": pd.Timestamp(event["start"]), + "end": pd.Timestamp(event["end"]) if event["end"] else pd.NaT, + } + ) + + return pd.DataFrame(data, columns=["key", "status", "flag", "start", "end"]) def write(self): data = self.get_result() if data is None: return - if self.settings['impediments_data']: - self.write_data(data, self.settings['impediments_data']) + if self.settings["impediments_data"]: + self.write_data(data, self.settings["impediments_data"]) - if self.settings['impediments_chart']: - self.write_impediments_chart(data, self.settings['impediments_chart']) + if self.settings["impediments_chart"]: + self.write_impediments_chart(data, self.settings["impediments_chart"]) - if self.settings['impediments_days_chart']: - self.write_impediments_days_chart(data, self.settings['impediments_days_chart']) + if self.settings["impediments_days_chart"]: + self.write_impediments_days_chart(data, self.settings["impediments_days_chart"]) - if self.settings['impediments_status_chart']: - self.write_impediments_status_chart(data, self.settings['impediments_status_chart']) + if self.settings["impediments_status_chart"]: + self.write_impediments_status_chart(data, self.settings["impediments_status_chart"]) - if self.settings['impediments_status_days_chart']: - self.write_impediments_status_days_chart(data, self.settings['impediments_status_days_chart']) + if self.settings["impediments_status_days_chart"]: + self.write_impediments_status_days_chart(data, self.settings["impediments_status_days_chart"]) - def write_data(self, data, output_files): + @staticmethod + def write_data(data, output_files): for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing impediments data to %s", output_file) - if output_extension == '.json': - data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - data.to_excel(output_file, 'Impediments', header=True) + if output_extension == ".json": + data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + data.to_excel(output_file, "Impediments", header=True) else: - data.to_csv(output_file, header=True, date_format='%Y-%m-%d', index=False) + data.to_csv(output_file, header=True, date_format="%Y-%m-%d", index=False) def write_impediments_chart(self, chart_data, output_file): if len(chart_data.index) == 0: logger.warning("Cannot draw impediments chart with zero items") return - window = self.settings['impediments_window'] - breakdown = breakdown_by_month(chart_data, 'start', 'end', 'key', 'flag') + window = self.settings["impediments_window"] + breakdown = breakdown_by_month(chart_data, "start", "end", "key", "flag") if window: breakdown = breakdown[-window:] @@ -111,21 +116,21 @@ def write_impediments_chart(self, chart_data, output_file): breakdown.plot.bar(ax=ax, stacked=True) - if self.settings['impediments_chart_title']: - ax.set_title(self.settings['impediments_chart_title']) + if self.settings["impediments_chart_title"]: + ax.set_title(self.settings["impediments_chart_title"]) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of impediments", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing impediments chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) def write_impediments_days_chart(self, chart_data, output_file): @@ -133,8 +138,8 @@ def write_impediments_days_chart(self, chart_data, output_file): logger.warning("Cannot draw impediments days chart with zero items") return - window = self.settings['impediments_window'] - breakdown = breakdown_by_month_sum_days(chart_data, 'start', 'end', 'flag') + window = self.settings["impediments_window"] + breakdown = breakdown_by_month_sum_days(chart_data, "start", "end", "flag") if window: breakdown = breakdown[-window:] @@ -147,21 +152,21 @@ def write_impediments_days_chart(self, chart_data, output_file): breakdown.plot.bar(ax=ax, stacked=True) - if self.settings['impediments_days_chart_title']: - ax.set_title(self.settings['impediments_days_chart_title']) + if self.settings["impediments_days_chart_title"]: + ax.set_title(self.settings["impediments_days_chart_title"]) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Total impeded days", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing impediments days chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) def write_impediments_status_chart(self, chart_data, output_file): @@ -169,10 +174,10 @@ def write_impediments_status_chart(self, chart_data, output_file): logger.warning("Cannot draw impediments status chart with zero items") return - window = self.settings['impediments_window'] - cycle_names = [s['name'] for s in self.settings['cycle']] + window = self.settings["impediments_window"] + cycle_names = [s["name"] for s in self.settings["cycle"]] - breakdown = breakdown_by_month(chart_data, 'start', 'end', 'key', 'status', cycle_names) + breakdown = breakdown_by_month(chart_data, "start", "end", "key", "status", cycle_names) if window: breakdown = breakdown[-window:] @@ -185,21 +190,21 @@ def write_impediments_status_chart(self, chart_data, output_file): breakdown.plot.bar(ax=ax, stacked=True) - if self.settings['impediments_status_chart_title']: - ax.set_title(self.settings['impediments_status_chart_title']) + if self.settings["impediments_status_chart_title"]: + ax.set_title(self.settings["impediments_status_chart_title"]) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of impediments", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing impediments status chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) def write_impediments_status_days_chart(self, chart_data, output_file): @@ -207,10 +212,10 @@ def write_impediments_status_days_chart(self, chart_data, output_file): logger.warning("Cannot draw impediments status days chart with zero items") return - window = self.settings['impediments_window'] - cycle_names = [s['name'] for s in self.settings['cycle']] + window = self.settings["impediments_window"] + cycle_names = [s["name"] for s in self.settings["cycle"]] - breakdown = breakdown_by_month_sum_days(chart_data, 'start', 'end', 'status', cycle_names) + breakdown = breakdown_by_month_sum_days(chart_data, "start", "end", "status", cycle_names) if window: breakdown = breakdown[-window:] @@ -223,19 +228,19 @@ def write_impediments_status_days_chart(self, chart_data, output_file): breakdown.plot.bar(ax=ax, stacked=True) - if self.settings['impediments_status_days_chart_title']: - ax.set_title(self.settings['impediments_status_days_chart_title']) + if self.settings["impediments_status_days_chart_title"]: + ax.set_title(self.settings["impediments_status_days_chart_title"]) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Total impeded days", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing impediments status days chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/impediments_test.py b/jira_agile_metrics/calculators/impediments_test.py index 3c146ba..bea462e 100644 --- a/jira_agile_metrics/calculators/impediments_test.py +++ b/jira_agile_metrics/calculators/impediments_test.py @@ -1,179 +1,271 @@ -import pytest from datetime import date + from pandas import DataFrame, NaT, Timestamp +import pytest +from ..conftest import _issues +from ..utils import extend_dict from .cycletime import CycleTimeCalculator from .impediments import ImpedimentsCalculator -from ..utils import extend_dict - -from ..conftest import _issues def _ts(datestring, timestring="00:00:00", freq=None): - return Timestamp('%s %s' % (datestring, timestring,), freq=freq) + return Timestamp("%s %s" % (datestring, timestring), freq=freq) -@pytest.fixture -def query_manager(minimal_query_manager): + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'impediments_data': 'impediments.csv', - 'impediments_chart': 'impediments.png', - 'impediments_days_chart': 'impediments-days.png', - 'impediments_status_chart': 'impediments-status.png', - 'impediments_status_days_chart': 'impediments-status-days.png', - }) - -@pytest.fixture -def columns(minimal_cycle_time_columns): + +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, + { + "impediments_data": "impediments.csv", + "impediments_chart": "impediments.png", + "impediments_days_chart": "impediments-days.png", + "impediments_status_chart": "impediments-status.png", + "impediments_status_days_chart": "impediments-status-days.png", + }, + ) + + +@pytest.fixture(name="columns") +def fixture_columns(minimal_cycle_time_columns): return minimal_cycle_time_columns -@pytest.fixture -def cycle_time_results(minimal_cycle_time_columns): - """A results dict mimicing a minimal result from the CycleTimeCalculator. - """ + +@pytest.fixture(name="cycle_time_results") +def fixture_cycle_time_results(minimal_cycle_time_columns): + """A results dict mimicing a minimal result from the CycleTimeCalculator.""" return { - CycleTimeCalculator: DataFrame(_issues([ - dict(Backlog=_ts('2018-01-01'), Committed=NaT, Build=NaT, Test=NaT, Done=NaT, blocked_days=0, impediments=[]), - dict(Backlog=_ts('2018-01-02'), Committed=_ts('2018-01-03'), Build=NaT, Test=NaT, Done=NaT, blocked_days=4, impediments=[ - {'start': date(2018, 1, 5), 'end': date(2018, 1, 7), 'status': 'Backlog', 'flag': 'Impediment'}, # ignored because it was blocked in backlog - {'start': date(2018, 1, 10), 'end': date(2018, 1, 12), 'status': 'Committed', 'flag': 'Impediment'}, # included - ]), - dict(Backlog=_ts('2018-01-03'), Committed=_ts('2018-01-03'), Build=_ts('2018-01-04'), Test=_ts('2018-01-05'), Done=_ts('2018-01-06'), blocked_days=4, impediments=[ - {'start': date(2018, 1, 4), 'end': date(2018, 1, 5), 'status': 'Build', 'flag': 'Impediment'}, # included - {'start': date(2018, 1, 7), 'end': date(2018, 1, 10), 'status': 'Done', 'flag': 'Impediment'}, # ignored because it was blocked in done - ]), - dict(Backlog=_ts('2018-01-04'), Committed=_ts('2018-01-04'), Build=NaT, Test=NaT, Done=NaT, blocked_days=100, impediments=[ - {'start': date(2018, 1, 5), 'end': None, 'status': 'Committed', 'flag': 'Awaiting input'}, # open ended, still included - ]), - ]), columns=minimal_cycle_time_columns) + CycleTimeCalculator: DataFrame( + _issues( + [ + dict( + Backlog=_ts("2018-01-01"), + Committed=NaT, + Build=NaT, + Test=NaT, + Done=NaT, + blocked_days=0, + impediments=[], + ), + dict( + Backlog=_ts("2018-01-02"), + Committed=_ts("2018-01-03"), + Build=NaT, + Test=NaT, + Done=NaT, + blocked_days=4, + impediments=[ + { + "start": date(2018, 1, 5), + "end": date(2018, 1, 7), + "status": "Backlog", + "flag": "Impediment", + }, # ignored because it was blocked in backlog + { + "start": date(2018, 1, 10), + "end": date(2018, 1, 12), + "status": "Committed", + "flag": "Impediment", + }, # included + ], + ), + dict( + Backlog=_ts("2018-01-03"), + Committed=_ts("2018-01-03"), + Build=_ts("2018-01-04"), + Test=_ts("2018-01-05"), + Done=_ts("2018-01-06"), + blocked_days=4, + impediments=[ + { + "start": date(2018, 1, 4), + "end": date(2018, 1, 5), + "status": "Build", + "flag": "Impediment", + }, # included + { + "start": date(2018, 1, 7), + "end": date(2018, 1, 10), + "status": "Done", + "flag": "Impediment", + }, # ignored because it was blocked in done + ], + ), + dict( + Backlog=_ts("2018-01-04"), + Committed=_ts("2018-01-04"), + Build=NaT, + Test=NaT, + Done=NaT, + blocked_days=100, + impediments=[ + {"start": date(2018, 1, 5), "end": None, "status": "Committed", "flag": "Awaiting input"}, + ], # open ended, still included + ), + ] + ), + columns=minimal_cycle_time_columns, + ) } + def test_only_runs_if_charts_set(query_manager, settings, cycle_time_results): - test_settings = extend_dict(settings, { - 'impediments_data': None, - 'impediments_chart': None, - 'impediments_days_chart': None, - 'impediments_status_chart': None, - 'impediments_status_days_chart': None, - }) + test_settings = extend_dict( + settings, + { + "impediments_data": None, + "impediments_chart": None, + "impediments_days_chart": None, + "impediments_status_chart": None, + "impediments_status_days_chart": None, + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is None - test_settings = extend_dict(settings, { - 'impediments_data': 'impediments.csv', - 'impediments_chart': None, - 'impediments_days_chart': None, - 'impediments_status_chart': None, - 'impediments_status_days_chart': None, - }) + test_settings = extend_dict( + settings, + { + "impediments_data": "impediments.csv", + "impediments_chart": None, + "impediments_days_chart": None, + "impediments_status_chart": None, + "impediments_status_days_chart": None, + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is not None - test_settings = extend_dict(settings, { - 'impediments_data': None, - 'impediments_chart': 'impediments.png', - 'impediments_days_chart': None, - 'impediments_status_chart': None, - 'impediments_status_days_chart': None, - }) + test_settings = extend_dict( + settings, + { + "impediments_data": None, + "impediments_chart": "impediments.png", + "impediments_days_chart": None, + "impediments_status_chart": None, + "impediments_status_days_chart": None, + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is not None - test_settings = extend_dict(settings, { - 'impediments_data': None, - 'impediments_chart': None, - 'impediments_days_chart': 'days.png', - 'impediments_status_chart': None, - 'impediments_status_days_chart': None, - }) + test_settings = extend_dict( + settings, + { + "impediments_data": None, + "impediments_chart": None, + "impediments_days_chart": "days.png", + "impediments_status_chart": None, + "impediments_status_days_chart": None, + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is not None - test_settings = extend_dict(settings, { - 'impediments_data': None, - 'impediments_chart': None, - 'impediments_days_chart': None, - 'impediments_status_chart': 'status.png', - 'impediments_status_days_chart': None, - }) + test_settings = extend_dict( + settings, + { + "impediments_data": None, + "impediments_chart": None, + "impediments_days_chart": None, + "impediments_status_chart": "status.png", + "impediments_status_days_chart": None, + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is not None - test_settings = extend_dict(settings, { - 'impediments_data': None, - 'impediments_chart': None, - 'impediments_days_chart': None, - 'impediments_status_chart': None, - 'impediments_status_days_chart': 'status-days.png', - }) + test_settings = extend_dict( + settings, + { + "impediments_data": None, + "impediments_chart": None, + "impediments_days_chart": None, + "impediments_status_chart": None, + "impediments_status_days_chart": "status-days.png", + }, + ) calculator = ImpedimentsCalculator(query_manager, test_settings, cycle_time_results) data = calculator.run() assert data is not None + def test_empty(query_manager, settings, columns): - results = { - CycleTimeCalculator: DataFrame([], columns=columns) - } + results = {CycleTimeCalculator: DataFrame([], columns=columns)} calculator = ImpedimentsCalculator(query_manager, settings, results) data = calculator.run() assert len(data.index) == 0 + def test_columns(query_manager, settings, cycle_time_results): calculator = ImpedimentsCalculator(query_manager, settings, cycle_time_results) data = calculator.run() - assert list(data.columns) == ['key', 'status', 'flag', 'start', 'end'] + assert list(data.columns) == ["key", "status", "flag", "start", "end"] + def test_calculate_impediments(query_manager, settings, cycle_time_results): calculator = ImpedimentsCalculator(query_manager, settings, cycle_time_results) data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-2', 'status': 'Committed', 'flag': 'Impediment', 'start': _ts('2018-01-10'), 'end': _ts('2018-01-12')}, - {'key': 'A-3', 'status': 'Build', 'flag': 'Impediment', 'start': _ts('2018-01-04'), 'end': _ts('2018-01-05')}, - {'key': 'A-4', 'status': 'Committed', 'flag': 'Awaiting input', 'start': _ts('2018-01-05'), 'end': NaT}, + assert data.to_dict("records") == [ + { + "key": "A-2", + "status": "Committed", + "flag": "Impediment", + "start": _ts("2018-01-10"), + "end": _ts("2018-01-12"), + }, + {"key": "A-3", "status": "Build", "flag": "Impediment", "start": _ts("2018-01-04"), "end": _ts("2018-01-05")}, + {"key": "A-4", "status": "Committed", "flag": "Awaiting input", "start": _ts("2018-01-05"), "end": NaT}, ] + def test_different_backlog_column(query_manager, settings, cycle_time_results): - settings = extend_dict(settings, { - 'backlog_column': 'Committed', - 'committed_column': 'Build', - }) + settings = extend_dict(settings, {"backlog_column": "Committed", "committed_column": "Build"}) calculator = ImpedimentsCalculator(query_manager, settings, cycle_time_results) data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-3', 'status': 'Build', 'flag': 'Impediment', 'start': _ts('2018-01-04'), 'end': _ts('2018-01-05')}, + assert data.to_dict("records") == [ + {"key": "A-3", "status": "Build", "flag": "Impediment", "start": _ts("2018-01-04"), "end": _ts("2018-01-05")}, ] + def test_different_done_column(query_manager, settings, cycle_time_results): - settings = extend_dict(settings, { - 'done_column': 'Build', - }) + settings = extend_dict(settings, {"done_column": "Build"}) calculator = ImpedimentsCalculator(query_manager, settings, cycle_time_results) data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-2', 'status': 'Committed', 'flag': 'Impediment', 'start': _ts('2018-01-10'), 'end': _ts('2018-01-12')}, - {'key': 'A-4', 'status': 'Committed', 'flag': 'Awaiting input', 'start': _ts('2018-01-05'), 'end': NaT}, + assert data.to_dict("records") == [ + { + "key": "A-2", + "status": "Committed", + "flag": "Impediment", + "start": _ts("2018-01-10"), + "end": _ts("2018-01-12"), + }, + {"key": "A-4", "status": "Committed", "flag": "Awaiting input", "start": _ts("2018-01-05"), "end": NaT}, ] diff --git a/jira_agile_metrics/calculators/netflow.py b/jira_agile_metrics/calculators/netflow.py index 75455d5..7b120d5 100644 --- a/jira_agile_metrics/calculators/netflow.py +++ b/jira_agile_metrics/calculators/netflow.py @@ -1,23 +1,24 @@ import logging + import matplotlib.pyplot as plt from ..calculator import Calculator from ..utils import set_chart_style - from .cfd import CFDCalculator + logger = logging.getLogger(__name__) + class NetFlowChartCalculator(Calculator): - """Draw a net flow chart - """ + """Draw a net flow chart""" def run(self): cfd_data = self.get_result(CFDCalculator) - cycle_names = [s['name'] for s in self.settings['cycle']] + cycle_names = [s["name"] for s in self.settings["cycle"]] - start_column = self.settings['committed_column'] - done_column = self.settings['done_column'] + start_column = self.settings["committed_column"] + done_column = self.settings["done_column"] if start_column not in cycle_names: logger.error("Committed column %s does not exist", start_column) @@ -26,19 +27,19 @@ def run(self): logger.error("Done column %s does not exist", done_column) return None - frequency = self.settings['net_flow_frequency'] + frequency = self.settings["net_flow_frequency"] logger.debug("Calculating net flow at frequency %s", frequency) - - net_flow_data = cfd_data[[start_column, done_column]].resample(frequency, label='left').max() - net_flow_data['arrivals'] = net_flow_data[start_column].diff().fillna(net_flow_data[start_column]) - net_flow_data['departures'] = net_flow_data[done_column].diff().fillna(net_flow_data[done_column]) - net_flow_data['net_flow'] = net_flow_data['arrivals'] - net_flow_data['departures'] - net_flow_data['positive'] = net_flow_data['net_flow'] >= 0 + + net_flow_data = cfd_data[[start_column, done_column]].resample(frequency, label="left").max() + net_flow_data["arrivals"] = net_flow_data[start_column].diff().fillna(net_flow_data[start_column]) + net_flow_data["departures"] = net_flow_data[done_column].diff().fillna(net_flow_data[done_column]) + net_flow_data["net_flow"] = net_flow_data["arrivals"] - net_flow_data["departures"] + net_flow_data["positive"] = net_flow_data["net_flow"] >= 0 return net_flow_data - + def write(self): - output_file = self.settings['net_flow_chart'] + output_file = self.settings["net_flow_chart"] if not output_file: logger.debug("No output file specified for net flow chart") return @@ -50,27 +51,27 @@ def write(self): return fig, ax = plt.subplots() - - if self.settings['net_flow_chart_title']: - ax.set_title(self.settings['net_flow_chart_title']) + + if self.settings["net_flow_chart_title"]: + ax.set_title(self.settings["net_flow_chart_title"]) ax.set_xlabel("Period starting") ax.set_ylabel("Net flow (departures - arrivals)") - net_flow_data = chart_data[['net_flow', 'positive']] + net_flow_data = chart_data[["net_flow", "positive"]] - window = self.settings['net_flow_window'] + window = self.settings["net_flow_window"] if window: net_flow_data = net_flow_data[-window:] - net_flow_data['net_flow'].plot.bar(ax=ax, color=net_flow_data['positive'].map({True: 'r', False: 'b'}),) + net_flow_data["net_flow"].plot.bar(ax=ax, color=net_flow_data["positive"].map({True: "r", False: "b"})) labels = [d.strftime("%d/%m/%Y") for d in net_flow_data.index] - ax.set_xticklabels(labels, rotation=70, size='small') + ax.set_xticklabels(labels, rotation=70, size="small") set_chart_style() # Write file logger.info("Writing ageing WIP chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/netflow_test.py b/jira_agile_metrics/calculators/netflow_test.py index 5180da2..1dd9dcf 100644 --- a/jira_agile_metrics/calculators/netflow_test.py +++ b/jira_agile_metrics/calculators/netflow_test.py @@ -1,25 +1,26 @@ -import pytest import datetime + from pandas import DataFrame, Timestamp, date_range +import pytest +from ..utils import extend_dict from .cfd import CFDCalculator from .netflow import NetFlowChartCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'net_flow_frequency': 'D' - }) +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict(minimal_settings, {"net_flow_frequency": "D"}) + -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(query_manager, settings, large_cycle_time_results): - + +@pytest.fixture(name="results") +def fixture_results(query_manager, settings, large_cycle_time_results): + # CFD data frame and net flow: # # Backlog Committed Build Test Done @@ -34,19 +35,25 @@ def results(query_manager, settings, large_cycle_time_results): # 2018-01-09 18.0 15.0 12.0 9.0 6.0 --> -6 # - return extend_dict(large_cycle_time_results, { - CFDCalculator: CFDCalculator(query_manager, settings, large_cycle_time_results).run() - }) + return extend_dict( + large_cycle_time_results, + {CFDCalculator: CFDCalculator(query_manager, settings, large_cycle_time_results).run()}, + ) + def test_empty(query_manager, settings, minimal_cycle_time_columns): results = { - CFDCalculator: DataFrame([], columns=['Backlog', 'Committed', 'Build', 'Test', 'Done'], index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq='D')) + CFDCalculator: DataFrame( + [], + columns=["Backlog", "Committed", "Build", "Test", "Done"], + index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq="D"), + ) } calculator = NetFlowChartCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == ['Committed', 'Done', 'arrivals', 'departures', 'net_flow', 'positive'] + assert list(data.columns) == ["Committed", "Done", "arrivals", "departures", "net_flow", "positive"] assert len(data.index) == 0 @@ -54,7 +61,8 @@ def test_columns(query_manager, settings, results): calculator = NetFlowChartCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == ['Committed', 'Done', 'arrivals', 'departures', 'net_flow', 'positive'] + assert list(data.columns) == ["Committed", "Done", "arrivals", "departures", "net_flow", "positive"] + def test_calculate_net_flow(query_manager, settings, results): calculator = NetFlowChartCalculator(query_manager, settings, results) @@ -62,60 +70,58 @@ def test_calculate_net_flow(query_manager, settings, results): data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D'), - Timestamp('2018-01-07 00:00:00', freq='D'), - Timestamp('2018-01-08 00:00:00', freq='D'), - Timestamp('2018-01-09 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), + Timestamp("2018-01-07 00:00:00", freq="D"), + Timestamp("2018-01-08 00:00:00", freq="D"), + Timestamp("2018-01-09 00:00:00", freq="D"), ] - assert data[['arrivals', 'departures', 'net_flow', 'positive']].to_dict('records') == [ - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, - {'arrivals': 9.0, 'departures': 0.0, 'net_flow': 9.0, 'positive': True}, - {'arrivals': 4.0, 'departures': 0.0, 'net_flow': 4.0, 'positive': True}, - {'arrivals': 1.0, 'departures': 0.0, 'net_flow': 1.0, 'positive': True}, - {'arrivals': 1.0, 'departures': 0.0, 'net_flow': 1.0, 'positive': True}, - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, - {'arrivals': 0.0, 'departures': 2.0, 'net_flow': -2.0, 'positive': False}, - {'arrivals': 0.0, 'departures': 2.0, 'net_flow': -2.0, 'positive': False}, - {'arrivals': 0.0, 'departures': 2.0, 'net_flow': -2.0, 'positive': False}, + assert data[["arrivals", "departures", "net_flow", "positive"]].to_dict("records") == [ + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, + {"arrivals": 9.0, "departures": 0.0, "net_flow": 9.0, "positive": True}, + {"arrivals": 4.0, "departures": 0.0, "net_flow": 4.0, "positive": True}, + {"arrivals": 1.0, "departures": 0.0, "net_flow": 1.0, "positive": True}, + {"arrivals": 1.0, "departures": 0.0, "net_flow": 1.0, "positive": True}, + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, + {"arrivals": 0.0, "departures": 2.0, "net_flow": -2.0, "positive": False}, + {"arrivals": 0.0, "departures": 2.0, "net_flow": -2.0, "positive": False}, + {"arrivals": 0.0, "departures": 2.0, "net_flow": -2.0, "positive": False}, ] + def test_calculate_net_flow_different_columns(query_manager, settings, results): - settings.update({ - 'committed_column': 'Build', - 'done_column': 'Test', - }) + settings.update({"committed_column": "Build", "done_column": "Test"}) calculator = NetFlowChartCalculator(query_manager, settings, results) data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D'), - Timestamp('2018-01-07 00:00:00', freq='D'), - Timestamp('2018-01-08 00:00:00', freq='D'), - Timestamp('2018-01-09 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), + Timestamp("2018-01-07 00:00:00", freq="D"), + Timestamp("2018-01-08 00:00:00", freq="D"), + Timestamp("2018-01-09 00:00:00", freq="D"), ] - assert data[['arrivals', 'departures', 'net_flow', 'positive']].to_dict('records') == [ - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, - {'arrivals': 8.0, 'departures': 0.0, 'net_flow': 8.0, 'positive': True}, - {'arrivals': 2.0, 'departures': 3.0, 'net_flow': -1.0, 'positive': False}, - {'arrivals': 1.0, 'departures': 5.0, 'net_flow': -4.0, 'positive': False}, - {'arrivals': 1.0, 'departures': 0.0, 'net_flow': 1.0, 'positive': True}, - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, - {'arrivals': 0.0, 'departures': 1.0, 'net_flow': -1.0, 'positive': False}, - {'arrivals': 0.0, 'departures': 0.0, 'net_flow': 0.0, 'positive': True}, + assert data[["arrivals", "departures", "net_flow", "positive"]].to_dict("records") == [ + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, + {"arrivals": 8.0, "departures": 0.0, "net_flow": 8.0, "positive": True}, + {"arrivals": 2.0, "departures": 3.0, "net_flow": -1.0, "positive": False}, + {"arrivals": 1.0, "departures": 5.0, "net_flow": -4.0, "positive": False}, + {"arrivals": 1.0, "departures": 0.0, "net_flow": 1.0, "positive": True}, + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, + {"arrivals": 0.0, "departures": 1.0, "net_flow": -1.0, "positive": False}, + {"arrivals": 0.0, "departures": 0.0, "net_flow": 0.0, "positive": True}, ] diff --git a/jira_agile_metrics/calculators/percentiles.py b/jira_agile_metrics/calculators/percentiles.py index 140b812..0093387 100644 --- a/jira_agile_metrics/calculators/percentiles.py +++ b/jira_agile_metrics/calculators/percentiles.py @@ -2,25 +2,25 @@ from ..calculator import Calculator from ..utils import get_extension - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class PercentilesCalculator(Calculator): - """Build percentiles for `cycle_time` in cycle data as a DataFrame - """ + """Build percentiles for `cycle_time` in cycle data as a DataFrame""" def run(self): cycle_data = self.get_result(CycleTimeCalculator) - quantiles = self.settings['quantiles'] - logger.debug("Calculating percentiles at %s", ', '.join(['%.2f' % (q * 100.0) for q in quantiles])) + quantiles = self.settings["quantiles"] + logger.debug("Calculating percentiles at %s", ", ".join(["%.2f" % (q * 100.0) for q in quantiles])) - return cycle_data['cycle_time'].dropna().quantile(quantiles) + return cycle_data["cycle_time"].dropna().quantile(quantiles) def write(self): - output_files = self.settings['percentiles_data'] + output_files = self.settings["percentiles_data"] if not output_files: logger.debug("No output file specified for percentiles data") return @@ -30,9 +30,9 @@ def write(self): for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing percentiles data to %s", output_file) - if output_extension == '.json': - file_data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - file_data.to_frame(name='percentiles').to_excel(output_file, 'Percentiles', header=True) + if output_extension == ".json": + file_data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + file_data.to_frame(name="percentiles").to_excel(output_file, "Percentiles", header=True) else: file_data.to_csv(output_file, header=True) diff --git a/jira_agile_metrics/calculators/percentiles_test.py b/jira_agile_metrics/calculators/percentiles_test.py index 99e778a..a7c9556 100644 --- a/jira_agile_metrics/calculators/percentiles_test.py +++ b/jira_agile_metrics/calculators/percentiles_test.py @@ -1,30 +1,30 @@ -import pytest import math + from pandas import DataFrame, Timedelta +import pytest +from ..utils import extend_dict from .cycletime import CycleTimeCalculator from .percentiles import PercentilesCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'quantiles': [0.1, 0.5, 0.9] - }) +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict(minimal_settings, {"quantiles": [0.1, 0.5, 0.9]}) -@pytest.fixture -def query_manager(minimal_query_manager): + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(large_cycle_time_results): + +@pytest.fixture(name="results") +def fixture_results(large_cycle_time_results): return extend_dict(large_cycle_time_results, {}) + def test_empty(query_manager, settings, minimal_cycle_time_columns): - results = { - CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[]) - } + results = {CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[])} calculator = PercentilesCalculator(query_manager, settings, results) @@ -35,10 +35,11 @@ def test_empty(query_manager, settings, minimal_cycle_time_columns): assert math.isnan(list(data)[1]) assert math.isnan(list(data)[2]) + def test_calculate_percentiles(query_manager, settings, results): calculator = PercentilesCalculator(query_manager, settings, results) data = calculator.run() assert list(data.index) == [0.1, 0.5, 0.9] - assert list(data) == [Timedelta('4 days 12:00:00'), Timedelta('5 days 00:00:00'), Timedelta('5 days 00:00:00')] + assert list(data) == [Timedelta("4 days 12:00:00"), Timedelta("5 days 00:00:00"), Timedelta("5 days 00:00:00")] diff --git a/jira_agile_metrics/calculators/progressreport.py b/jira_agile_metrics/calculators/progressreport.py index 2d6b140..2800116 100644 --- a/jira_agile_metrics/calculators/progressreport.py +++ b/jira_agile_metrics/calculators/progressreport.py @@ -1,108 +1,107 @@ +import base64 +import datetime import io import logging -import random import math -import base64 -import datetime -import dateutil +import random +import dateutil +import jinja2 +import matplotlib.dates as mdates +import matplotlib.pyplot as plt +import matplotlib.transforms import numpy as np import pandas as pd import scipy.stats import statsmodels.formula.api as sm -import matplotlib.pyplot as plt -import matplotlib.dates as mdates -import matplotlib.transforms - -import jinja2 from ..calculator import Calculator from ..utils import set_chart_style, to_days_since_epoch - +from .cfd import calculate_cfd_data from .cycletime import calculate_cycle_times -from .throughput import calculate_throughput from .forecast import throughput_sampler -from .cfd import calculate_cfd_data from .scatterplot import calculate_scatterplot_data +from .throughput import calculate_throughput + logger = logging.getLogger(__name__) jinja_env = jinja2.Environment( - loader=jinja2.PackageLoader('jira_agile_metrics', 'calculators'), - autoescape=jinja2.select_autoescape(['html', 'xml']) + loader=jinja2.PackageLoader("jira_agile_metrics", "calculators"), + autoescape=jinja2.select_autoescape(["html", "xml"]), ) + class ProgressReportCalculator(Calculator): - """Output a progress report based on Monte Carlo forecast to completion - """ + """Output a progress report based on Monte Carlo forecast to completion""" def run(self, now=None, trials=1000): - if self.settings['progress_report'] is None: - return + if self.settings["progress_report"] is None: + return None # Prepare and validate configuration options - cycle = self.settings['cycle'] - cycle_names = [s['name'] for s in cycle] - quantiles = self.settings['quantiles'] + cycle = self.settings["cycle"] + quantiles = self.settings["quantiles"] - backlog_column = self.settings['backlog_column'] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] + backlog_column = self.settings["backlog_column"] + done_column = self.settings["done_column"] - epic_query_template = self.settings['progress_report_epic_query_template'] + epic_query_template = self.settings["progress_report_epic_query_template"] if not epic_query_template: if ( - self.settings['progress_report_outcome_query'] is not None or - self.settings['progress_report_outcomes'] is None or - len(self.settings['progress_report_outcomes']) == 0 or - any(map(lambda o: o['epic_query'] is None, self.settings['progress_report_outcomes'])) + self.settings["progress_report_outcome_query"] is not None + or self.settings["progress_report_outcomes"] is None + or len(self.settings["progress_report_outcomes"]) == 0 + or any(map(lambda o: o["epic_query"] is None, self.settings["progress_report_outcomes"])) ): - logger.error("`Progress report epic query template` is required unless all outcomes have `Epic query` set.") + logger.error( + "`Progress report epic query template` is required unless all outcomes have `Epic query` set." + ) return None - story_query_template = self.settings['progress_report_story_query_template'] + story_query_template = self.settings["progress_report_story_query_template"] if not story_query_template: logger.error("`Progress report story query template` is required") - return + return None # if not set, we only show forecast completion date, no RAG/deadline - epic_deadline_field = self.settings['progress_report_epic_deadline_field'] + epic_deadline_field = self.settings["progress_report_epic_deadline_field"] if epic_deadline_field and epic_deadline_field not in self.query_manager.jira_fields_to_names: epic_deadline_field = self.query_manager.field_name_to_id(epic_deadline_field) - epic_min_stories_field = self.settings['progress_report_epic_min_stories_field'] + epic_min_stories_field = self.settings["progress_report_epic_min_stories_field"] if epic_min_stories_field and epic_min_stories_field not in self.query_manager.jira_fields_to_names: epic_min_stories_field = self.query_manager.field_name_to_id(epic_min_stories_field) - epic_max_stories_field = self.settings['progress_report_epic_max_stories_field'] + epic_max_stories_field = self.settings["progress_report_epic_max_stories_field"] if not epic_max_stories_field: epic_max_stories_field = epic_min_stories_field elif epic_max_stories_field not in self.query_manager.jira_fields_to_names: epic_max_stories_field = self.query_manager.field_name_to_id(epic_max_stories_field) - epic_team_field = self.settings['progress_report_epic_team_field'] + epic_team_field = self.settings["progress_report_epic_team_field"] if epic_team_field and epic_team_field not in self.query_manager.jira_fields_to_names: epic_team_field = self.query_manager.field_name_to_id(epic_team_field) - teams = self.settings['progress_report_teams'] or [] + teams = self.settings["progress_report_teams"] or [] for team in teams: - if not team['name']: + if not team["name"]: logger.error("Teams must have a name.") return None - if not team['wip'] or team['wip'] < 1: + if not team["wip"] or team["wip"] < 1: logger.error("Team WIP must be >= 1") return None - if team['min_throughput'] or team['max_throughput']: - if not (team['min_throughput'] and team['max_throughput']): + if team["min_throughput"] or team["max_throughput"]: + if not (team["min_throughput"] and team["max_throughput"]): logger.error("If one of `Min throughput` or `Max throughput` is specified, both must be specified.") return None - if team['min_throughput'] > team['max_throughput']: + if team["min_throughput"] > team["max_throughput"]: logger.error("`Min throughput` must be less than or equal to `Max throughput`.") return None - if team['throughput_samples']: + if team["throughput_samples"]: logger.error("`Throughput samples` cannot be used if `Min/max throughput` is already specified.") # Note: If neither min/max throughput or samples are specified, we turn off forecasting @@ -110,31 +109,44 @@ def run(self, now=None, trials=1000): # If we aren't recording teams against epics, there can be either no teams # at all, or a single, default team, but not multiple. if not epic_team_field and len(teams) > 1: - logger.error("`Progress report epic team field` is required if there is more than one team under `Progress report teams`.") + logger.error( + "`Progress report epic team field` is required if there is more than one team under " + "`Progress report teams`." + ) return None # Find outcomes, either in the config file or by querying JIRA (or both). # If none set, we use a single epic query and don't group by outcomes - outcomes = [ - Outcome( - name=o['name'], - key=o['key'] if o['key'] else o['name'], - deadline=datetime.datetime.combine(o['deadline'], datetime.datetime.min.time()) if o['deadline'] else None, - epic_query=( - o['epic_query'] if o['epic_query'] - else epic_query_template.format(outcome='"%s"' % (o['key'] if o['key'] else o['name'])) + outcomes = ( + [ + Outcome( + name=o["name"], + key=o["key"] if o["key"] else o["name"], + deadline=datetime.datetime.combine(o["deadline"], datetime.datetime.min.time()) + if o["deadline"] + else None, + epic_query=( + o["epic_query"] + if o["epic_query"] + else epic_query_template.format(outcome='"%s"' % (o["key"] if o["key"] else o["name"])) + ), ) - ) for o in self.settings['progress_report_outcomes'] - ] if self.settings['progress_report_outcomes'] is not None else [] + for o in self.settings["progress_report_outcomes"] + ] + if self.settings["progress_report_outcomes"] is not None + else [] + ) - outcome_query = self.settings['progress_report_outcome_query'] + outcome_query = self.settings["progress_report_outcome_query"] if outcome_query: - outcome_deadline_field = self.settings['progress_report_outcome_deadline_field'] + outcome_deadline_field = self.settings["progress_report_outcome_deadline_field"] if outcome_deadline_field and outcome_deadline_field not in self.query_manager.jira_fields_to_names: outcome_deadline_field = self.query_manager.field_name_to_id(outcome_deadline_field) - outcomes.extend(find_outcomes(self.query_manager, outcome_query, outcome_deadline_field, epic_query_template)) + outcomes.extend( + find_outcomes(self.query_manager, outcome_query, outcome_deadline_field, epic_query_template) + ) if len(outcomes) > 0: if not all([bool(outcome.name) for outcome in outcomes]): @@ -150,15 +162,16 @@ def run(self, now=None, trials=1000): teams = [ Team( - name=team['name'], - wip=team['wip'], - min_throughput=team['min_throughput'], - max_throughput=team['max_throughput'], - throughput_samples=team['throughput_samples'].format( - team='"%s"' % team['name'], - ) if team['throughput_samples'] else None, - throughput_samples_window=team['throughput_samples_window'], - ) for team in teams + name=team["name"], + wip=team["wip"], + min_throughput=team["min_throughput"], + max_throughput=team["max_throughput"], + throughput_samples=team["throughput_samples"].format(team='"%s"' % team["name"]) + if team["throughput_samples"] + else None, + throughput_samples_window=team["throughput_samples_window"], + ) + for team in teams ] for team in teams: @@ -188,7 +201,7 @@ def run(self, now=None, trials=1000): epic_max_stories_field=epic_max_stories_field, epic_team_field=epic_team_field, epic_deadline_field=epic_deadline_field, - outcome=outcome + outcome=outcome, ): if not epic_team_field: epic.team = default_team # single defined team, or None @@ -198,7 +211,11 @@ def run(self, now=None, trials=1000): epic.team = team_lookup.get(epic_team_name.lower(), None) if epic.team is None: - logger.info("Cannot find team `%s` for epic `%s`. Dynamically adding a non-forecasted team." % (epic_team_name, epic.key,)) + logger.info( + "Cannot find team `%s` for epic `%s`. Dynamically adding a non-forecasted team.", + epic_team_name, + epic.key, + ) epic.team = Team(name=epic_team_name) teams.append(epic.team) team_lookup[epic_team_name.lower()] = epic.team @@ -220,7 +237,7 @@ def run(self, now=None, trials=1000): query_manager=self.query_manager, cycle=cycle, backlog_column=backlog_column, - done_column=done_column + done_column=done_column, ) # Run Monte Carlo simulation to complete @@ -231,13 +248,10 @@ def run(self, now=None, trials=1000): if team.sampler is not None: forecast_to_complete(team, team_epics[team.name.lower()], quantiles, trials=trials, now=now) - return { - 'outcomes': outcomes, - 'teams': teams - } + return {"outcomes": outcomes, "teams": teams} def write(self): - output_file = self.settings['progress_report'] + output_file = self.settings["progress_report"] if not output_file: logger.debug("No output file specified for progress report") return @@ -247,17 +261,17 @@ def write(self): logger.warning("No data found for progress report") return - cycle_names = [s['name'] for s in self.settings['cycle']] - backlog_column = self.settings['backlog_column'] - quantiles = self.settings['quantiles'] + cycle_names = [s["name"] for s in self.settings["cycle"]] + backlog_column = self.settings["backlog_column"] + quantiles = self.settings["quantiles"] - template = jinja_env.get_template('progressreport_template.html') + template = jinja_env.get_template("progressreport_template.html") today = datetime.date.today() epics_by_team = {} have_outcomes = False have_forecasts = False - for outcome in data['outcomes']: + for outcome in data["outcomes"]: if outcome.name is not None: have_outcomes = True @@ -269,55 +283,70 @@ def write(self): epics_by_team[epic.team.name] = [] epics_by_team[epic.team.name].append(epic) - with open(output_file, 'w') as of: - of.write(template.render( - jira_url=self.query_manager.jira._options['server'], - title=self.settings['progress_report_title'], - story_query_template=self.settings['progress_report_story_query_template'], - epic_deadline_field=self.settings['progress_report_epic_deadline_field'], - epic_min_stories_field=self.settings['progress_report_epic_min_stories_field'], - epic_max_stories_field=self.settings['progress_report_epic_max_stories_field'], - epic_team_field=self.settings['progress_report_epic_team_field'], - outcomes=data['outcomes'], - teams=data['teams'], - num_teams=len(data['teams']), - have_teams=len(data['teams']) > 1, - have_outcomes=have_outcomes, - have_forecasts=have_forecasts, - epics_by_team=epics_by_team, - enumerate=enumerate, - future_date=lambda weeks: forward_weeks(today, weeks), - color_code=lambda q: ( - 'primary' if q is None else - 'danger' if q <= 0.7 else - 'warning' if q <= 0.9 else - 'success' - ), - percent_complete=lambda epic: ( - int(round(((epic.stories_done or 0) / epic.max_stories) * 100)) - ), - outcome_charts={outcome.key: { - 'cfd': plot_cfd( - cycle_data=pd.concat([e.story_cycle_times for e in outcome.epics]), - cycle_names=cycle_names, - backlog_column=backlog_column, - target=sum([e.max_stories or 0 for e in outcome.epics]), - deadline=outcome.deadline - ) if len(outcome.epics) > 0 else None, - } for outcome in data['outcomes']}, - team_charts={team.name: { - 'cfd': plot_cfd(team.throughput_samples_cycle_times, cycle_names, backlog_column), - 'throughput': plot_throughput(team.throughput_samples_cycle_times), - 'scatterplot': plot_scatterplot(team.throughput_samples_cycle_times, quantiles) - } for team in data['teams']}, - epic_charts={epic.key: { - 'cfd': plot_cfd(epic.story_cycle_times, cycle_names, backlog_column, target=epic.max_stories, deadline=epic.deadline), - 'scatterplot': plot_scatterplot(epic.story_cycle_times, quantiles) - } for outcome in data['outcomes'] for epic in outcome.epics} - )) - -class Outcome(object): + with open(output_file, "w") as output_file: + output_file.write( + template.render( + jira_url=self.query_manager.jira.client_info(), + title=self.settings["progress_report_title"], + story_query_template=self.settings["progress_report_story_query_template"], + epic_deadline_field=self.settings["progress_report_epic_deadline_field"], + epic_min_stories_field=self.settings["progress_report_epic_min_stories_field"], + epic_max_stories_field=self.settings["progress_report_epic_max_stories_field"], + epic_team_field=self.settings["progress_report_epic_team_field"], + outcomes=data["outcomes"], + teams=data["teams"], + num_teams=len(data["teams"]), + have_teams=len(data["teams"]) > 1, + have_outcomes=have_outcomes, + have_forecasts=have_forecasts, + epics_by_team=epics_by_team, + enumerate=enumerate, + future_date=lambda weeks: forward_weeks(today, weeks), + color_code=lambda q: ( + "primary" if q is None else "danger" if q <= 0.7 else "warning" if q <= 0.9 else "success" + ), + percent_complete=lambda epic_: (int(round(((epic_.stories_done or 0) / epic_.max_stories) * 100))), + outcome_charts={ + outcome.key: { + "cfd": plot_cfd( + cycle_data=pd.concat([e.story_cycle_times for e in outcome.epics]), + cycle_names=cycle_names, + backlog_column=backlog_column, + target=sum([e.max_stories or 0 for e in outcome.epics]), + deadline=outcome.deadline, + ) + if len(outcome.epics) > 0 + else None, + } + for outcome in data["outcomes"] + }, + team_charts={ + team.name: { + "cfd": plot_cfd(team.throughput_samples_cycle_times, cycle_names, backlog_column), + "throughput": plot_throughput(team.throughput_samples_cycle_times), + "scatterplot": plot_scatterplot(team.throughput_samples_cycle_times, quantiles), + } + for team in data["teams"] + }, + epic_charts={ + epic.key: { + "cfd": plot_cfd( + epic.story_cycle_times, + cycle_names, + backlog_column, + target=epic.max_stories, + deadline=epic.deadline, + ), + "scatterplot": plot_scatterplot(epic.story_cycle_times, quantiles), + } + for outcome in data["outcomes"] + for epic in outcome.epics + }, + ) + ) + +class Outcome: def __init__(self, name, key, deadline=None, epic_query=None, epics=None, is_jira=False): self.name = name self.key = key @@ -326,16 +355,18 @@ def __init__(self, name, key, deadline=None, epic_query=None, epics=None, is_jir self.epics = epics if epics is not None else [] self.is_jira = is_jira -class Team(object): - def __init__(self, name, +class Team: + def __init__( + self, + name, wip=1, min_throughput=None, max_throughput=None, throughput_samples=None, throughput_samples_window=None, throughput_samples_cycle_times=None, - sampler=None + sampler=None, ): self.name = name self.wip = wip @@ -348,10 +379,19 @@ def __init__(self, name, self.sampler = sampler -class Epic(object): - def __init__(self, key, summary, status, resolution, resolution_date, - min_stories, max_stories, team_name, deadline, +class Epic: + def __init__( + self, + key, + summary, + status, + resolution, + resolution_date, + min_stories, + max_stories, + team_name, + deadline, story_query=None, story_cycle_times=None, stories_raised=None, @@ -362,7 +402,7 @@ def __init__(self, key, summary, status, resolution, resolution_date, last_story_finished=None, team=None, outcome=None, - forecast=None + forecast=None, ): self.key = key self.summary = summary @@ -387,25 +427,21 @@ def __init__(self, key, summary, status, resolution, resolution_date, self.outcome = outcome self.forecast = forecast -class Forecast(object): +class Forecast: def __init__(self, quantiles, deadline_quantile=None): self.quantiles = quantiles # pairs of (quantile, weeks) self.deadline_quantile = deadline_quantile -def throughput_range_sampler(min, max): + +def throughput_range_sampler(min_, max_): def get_throughput_range_sample(): - return random.randint(min, max) + return random.randint(min_, max_) + return get_throughput_range_sample -def update_team_sampler( - team, - query_manager, - cycle, - backlog_column, - done_column, - frequency='1W' -): + +def update_team_sampler(team, query_manager, cycle, backlog_column, done_column, frequency="1W"): # Use query if set if team.throughput_samples: @@ -420,7 +456,11 @@ def update_team_sampler( ) if throughput is None: - logger.error("No completed issues found by query `%s`. Unable to calculate throughput. Will use min/max throughput if set." % team.throughput_samples) + logger.error( + "No completed issues found by query `%s`. Unable to calculate throughput. " + "Will use min/max throughput if set.", + team.throughput_samples, + ) else: team.sampler = throughput_sampler(throughput, 0, 10) # we have to hardcode the buffer size @@ -428,17 +468,13 @@ def update_team_sampler( if team.sampler is None and team.min_throughput and team.max_throughput: team.sampler = throughput_range_sampler(team.min_throughput, max(team.min_throughput, team.max_throughput)) -def calculate_team_throughput( - team, - query_manager, - cycle, - backlog_column, - done_column, - frequency -): - backlog_column_index = [s['name'] for s in cycle].index(backlog_column) - committed_column = cycle[backlog_column_index + 1]['name'] # config parser ensures that `backlog` comes immediately before `committed` +def calculate_team_throughput(team, query_manager, cycle, backlog_column, done_column, frequency): + + backlog_column_index = [s["name"] for s in cycle].index(backlog_column) + committed_column = cycle[backlog_column_index + 1][ + "name" + ] # config parser ensures that `backlog` comes immediately before `committed` cycle_times = calculate_cycle_times( query_manager=query_manager, @@ -446,39 +482,31 @@ def calculate_team_throughput( attributes={}, committed_column=committed_column, done_column=done_column, - queries=[{'jql': team.throughput_samples, 'value': None}], + queries=[{"jql": team.throughput_samples, "value": None}], query_attribute=None, ) team.throughput_samples_cycle_times = cycle_times - if cycle_times['completed_timestamp'].count() == 0: + if cycle_times["completed_timestamp"].count() == 0: return None return calculate_throughput(cycle_times, frequency=frequency, window=team.throughput_samples_window) -def find_outcomes( - query_manager, - query, - outcome_deadline_field, - epic_query_template -): + +def find_outcomes(query_manager, query, outcome_deadline_field, epic_query_template): for issue in query_manager.find_issues(query): yield Outcome( name=issue.fields.summary, key=issue.key, deadline=date_value(query_manager, issue, outcome_deadline_field), epic_query=epic_query_template.format(outcome='"%s"' % issue.key), - is_jira=True + is_jira=True, ) + def find_epics( - query_manager, - epic_min_stories_field, - epic_max_stories_field, - epic_team_field, - epic_deadline_field, - outcome + query_manager, epic_min_stories_field, epic_max_stories_field, epic_team_field, epic_deadline_field, outcome ): for issue in query_manager.find_issues(outcome.epic_query): @@ -488,22 +516,23 @@ def find_epics( status=issue.fields.status.name, resolution=issue.fields.resolution.name if issue.fields.resolution else None, resolution_date=dateutil.parser.parse(issue.fields.resolutiondate) if issue.fields.resolutiondate else None, - min_stories=int_or_none(query_manager.resolve_field_value(issue, epic_min_stories_field)) if epic_min_stories_field else None, - max_stories=int_or_none(query_manager.resolve_field_value(issue, epic_max_stories_field)) if epic_max_stories_field else None, + min_stories=int_or_none(query_manager.resolve_field_value(issue, epic_min_stories_field)) + if epic_min_stories_field + else None, + max_stories=int_or_none(query_manager.resolve_field_value(issue, epic_max_stories_field)) + if epic_max_stories_field + else None, team_name=query_manager.resolve_field_value(issue, epic_team_field) if epic_team_field else None, deadline=date_value(query_manager, issue, epic_deadline_field, default=outcome.deadline), outcome=outcome, ) -def update_story_counts( - epic, - query_manager, - cycle, - backlog_column, - done_column -): - backlog_column_index = [s['name'] for s in cycle].index(backlog_column) - committed_column = cycle[backlog_column_index + 1]['name'] # config parser ensures that `backlog` comes immediately before `committed` + +def update_story_counts(epic, query_manager, cycle, backlog_column, done_column): + backlog_column_index = [s["name"] for s in cycle].index(backlog_column) + committed_column = cycle[backlog_column_index + 1][ + "name" + ] # config parser ensures that `backlog` comes immediately before `committed` story_cycle_times = calculate_cycle_times( query_manager=query_manager, @@ -511,7 +540,7 @@ def update_story_counts( attributes={}, committed_column=committed_column, done_column=done_column, - queries=[{'jql': epic.story_query, 'value': None}], + queries=[{"jql": epic.story_query, "value": None}], query_attribute=None, ) @@ -525,9 +554,13 @@ def update_story_counts( else: epic.stories_done = story_cycle_times[done_column].count() epic.stories_in_progress = story_cycle_times[committed_column].count() - epic.stories_done - epic.stories_in_backlog = story_cycle_times[backlog_column].count() - (epic.stories_in_progress + epic.stories_done) + epic.stories_in_backlog = story_cycle_times[backlog_column].count() - ( + epic.stories_in_progress + epic.stories_done + ) - epic.first_story_started = story_cycle_times[committed_column].min().date() if epic.stories_in_progress > 0 else None + epic.first_story_started = ( + story_cycle_times[committed_column].min().date() if epic.stories_in_progress > 0 else None + ) epic.last_story_finished = story_cycle_times[done_column].max().date() if epic.stories_done > 0 else None # if the actual number of stories exceeds min and/or max, adjust accordingly @@ -538,6 +571,7 @@ def update_story_counts( if not epic.max_stories or epic.max_stories < epic.stories_raised: epic.max_stories = max(epic.min_stories, epic.stories_raised, 1) + def forecast_to_complete(team, epics, quantiles, trials=1000, max_iterations=9999, now=None): # Allows unit testing to use a fixed date @@ -547,22 +581,19 @@ def forecast_to_complete(team, epics, quantiles, trials=1000, max_iterations=999 epic_trials = {e.key: pd.Series([np.nan] * trials) for e in epics} if team.sampler is None: - logger.error("Team %s has no sampler. Unable to forecast." % team.name) + logger.error("Team %s has no sampler. Unable to forecast.", team.name) return # apply WIP limit to list of epics not yet completed def filter_active_epics(trial_values): - return [t for t in trial_values if t['value'] < t['target']][:team.wip] + return [t for t in trial_values if t["value"] < t["target"]][: team.wip] for trial in range(trials): # track progress of each epic - target value is randomised - trial_values = [{ - 'epic': e, - 'value': e.stories_done, - 'target': calculate_epic_target(e), - 'weeks': 0 - } for e in epics] + trial_values = [ + {"epic": e, "value": e.stories_done, "target": calculate_epic_target(e), "weeks": 0} for e in epics + ] active_epics = filter_active_epics(trial_values) steps = 0 @@ -571,9 +602,9 @@ def filter_active_epics(trial_values): steps += 1 # increment all epics that are not finished - for ev in trial_values: - if ev['value'] < ev['target']: - ev['weeks'] += 1 + for trial_value in trial_values: + if trial_value["value"] < trial_value["target"]: + trial_value["weeks"] += 1 # draw a sample (throughput over a week) for the team and distribute # it over the active epics @@ -581,8 +612,8 @@ def filter_active_epics(trial_values): per_active_epic = int(sample / len(active_epics)) remainder = sample % len(active_epics) - for ev in active_epics: - ev['value'] += per_active_epic + for active_epic in active_epics: + active_epic["value"] += per_active_epic # reset in case some have finished active_epics = filter_active_epics(trial_values) @@ -590,17 +621,17 @@ def filter_active_epics(trial_values): # apply remainder to a randomly picked epic if sample didn't evenly divide if len(active_epics) > 0 and remainder > 0: lucky_epic = random.randint(0, len(active_epics) - 1) - active_epics[lucky_epic]['value'] += remainder + active_epics[lucky_epic]["value"] += remainder # reset in case some have finished active_epics = filter_active_epics(trial_values) if steps == max_iterations: - logger.warning("Trial %d did not complete after %d weeks, aborted." % (trial, max_iterations,)) + logger.warning("Trial %d did not complete after %d weeks, aborted.", trial, max_iterations) # record this trial - for ev in trial_values: - epic_trials[ev['epic'].key].iat[trial] = ev['weeks'] + for trial_value in trial_values: + epic_trials[trial_value["epic"].key].iat[trial] = trial_value["weeks"] for epic in epics: trials = epic_trials[epic.key].dropna() @@ -612,24 +643,23 @@ def filter_active_epics(trial_values): weeks_to_deadline = math.ceil((epic.deadline.date() - now.date()).days / 7) # ...and what trial quantile does that correspond to (higher = more confident) - deadline_quantile = scipy.stats.percentileofscore(trials, weeks_to_deadline, kind='weak') / 100 + deadline_quantile = scipy.stats.percentileofscore(trials, weeks_to_deadline, kind="weak") / 100 epic.forecast = Forecast( - quantiles=list(zip(quantiles, trials.quantile(quantiles))), - deadline_quantile=deadline_quantile + quantiles=list(zip(quantiles, trials.quantile(quantiles))), deadline_quantile=deadline_quantile ) else: epic.forecast = None + def calculate_epic_target(epic): - return random.randint( - max(epic.min_stories, 0), - max(epic.min_stories, epic.max_stories, 1) - ) + return random.randint(max(epic.min_stories, 0), max(epic.min_stories, epic.max_stories, 1)) + def forward_weeks(date, weeks): return (date - datetime.timedelta(days=date.weekday())) + datetime.timedelta(weeks=weeks) + def plot_cfd(cycle_data, cycle_names, backlog_column, target=None, deadline=None): # Prepare data @@ -641,7 +671,9 @@ def plot_cfd(cycle_data, cycle_names, backlog_column, target=None, deadline=None cfd_data = cfd_data.drop([backlog_column], axis=1) backlog_column_index = cycle_names.index(backlog_column) - started_column = cycle_names[backlog_column_index + 1] # config parser ensures there is at least one column after backlog + started_column = cycle_names[ + backlog_column_index + 1 + ] # config parser ensures there is at least one column after backlog if cfd_data[started_column].max() <= 0: return None @@ -666,15 +698,16 @@ def plot_cfd(cycle_data, cycle_names, backlog_column, target=None, deadline=None deadline_dse = to_days_since_epoch(deadline.date()) - ax.vlines(deadline, bottom, target, color='r', linestyles='-', linewidths=0.5) - ax.annotate("Due: %s" % (deadline.strftime("%d/%m/%Y"),), + ax.vlines(deadline, bottom, target, color="r", linestyles="-", linewidths=0.5) + ax.annotate( + "Due: %s" % (deadline.strftime("%d/%m/%Y"),), xy=(deadline, target), xytext=(0, 10), - textcoords='offset points', + textcoords="offset points", fontsize="x-small", ha="right", - color='black', - backgroundcolor="#ffffff" + color="black", + backgroundcolor="#ffffff", ) # Make sure we can see deadline line @@ -685,19 +718,20 @@ def plot_cfd(cycle_data, cycle_names, backlog_column, target=None, deadline=None if target is not None: left, right = ax.get_xlim() - ax.hlines(target, left, right, linestyles='--', linewidths=1) - ax.annotate("Target: %d" % (target,), + ax.hlines(target, left, right, linestyles="--", linewidths=1) + ax.annotate( + "Target: %d" % (target,), xy=(0.02, target), xycoords=transform_horizontal, fontsize="x-small", ha="left", va="center", - backgroundcolor="#ffffff" + backgroundcolor="#ffffff", ) # Legend - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) # Spacing @@ -710,12 +744,13 @@ def plot_cfd(cycle_data, cycle_names, backlog_column, target=None, deadline=None # Return as base64 encoded string buffer = io.BytesIO() - fig.savefig(buffer, format='png', bbox_inches='tight', dpi=220) + fig.savefig(buffer, format="png", bbox_inches="tight", dpi=220) plt.close(fig) - return base64.b64encode(buffer.getvalue()).decode('utf-8') + return base64.b64encode(buffer.getvalue()).decode("utf-8") -def plot_throughput(cycle_data, frequency='1W'): + +def plot_throughput(cycle_data, frequency="1W"): # Prepare data @@ -727,10 +762,10 @@ def plot_throughput(cycle_data, frequency='1W'): # Calculate regression day_zero = throughput_data.index[0] - throughput_data['day'] = (throughput_data.index - day_zero).days + throughput_data["day"] = (throughput_data.index - day_zero).days fit = sm.ols(formula="count ~ day", data=throughput_data).fit() - throughput_data['fitted'] = fit.predict(throughput_data) + throughput_data["fitted"] = fit.predict(throughput_data) # Plot @@ -739,34 +774,31 @@ def plot_throughput(cycle_data, frequency='1W'): ax.set_xlabel("Period starting") ax.set_ylabel("Number of items") - ax.plot(throughput_data.index, throughput_data['count'], marker='o') - plt.xticks(throughput_data.index, [d.date().strftime('%d/%m/%Y') for d in throughput_data.index], rotation=70, size='small') + ax.plot(throughput_data.index, throughput_data["count"], marker="o") + plt.xticks( + throughput_data.index, [d.date().strftime("%d/%m/%Y") for d in throughput_data.index], rotation=70, size="small" + ) _, top = ax.get_ylim() ax.set_ylim(0, top + 1) - for x, y in zip(throughput_data.index, throughput_data['count']): + for x, y in zip(throughput_data.index, throughput_data["count"]): if y == 0: continue - ax.annotate( - "%.0f" % y, - xy=(x.toordinal(), y + 0.2), - ha='center', - va='bottom', - fontsize="x-small", - ) + ax.annotate("%.0f" % y, xy=(x.toordinal(), y + 0.2), ha="center", va="bottom", fontsize="x-small") - ax.plot(throughput_data.index, throughput_data['fitted'], '--', linewidth=2) + ax.plot(throughput_data.index, throughput_data["fitted"], "--", linewidth=2) set_chart_style() # Return as base64 encoded string buffer = io.BytesIO() - fig.savefig(buffer, format='png', bbox_inches='tight', dpi=220) + fig.savefig(buffer, format="png", bbox_inches="tight", dpi=220) plt.close(fig) - return base64.b64encode(buffer.getvalue()).decode('utf-8') + return base64.b64encode(buffer.getvalue()).decode("utf-8") + def plot_scatterplot(cycle_data, quantiles): @@ -782,10 +814,13 @@ def plot_scatterplot(cycle_data, quantiles): # Plot - chart_data = pd.DataFrame({ - 'completed_date': scatterplot_data['completed_date'].values.astype('datetime64[D]'), - 'cycle_time': scatterplot_data['cycle_time'] - }, index=scatterplot_data.index) + chart_data = pd.DataFrame( + { + "completed_date": scatterplot_data["completed_date"].values.astype("datetime64[D]"), + "cycle_time": scatterplot_data["cycle_time"], + }, + index=scatterplot_data.index, + ) fig, ax = plt.subplots() fig.autofmt_xdate() @@ -793,21 +828,22 @@ def plot_scatterplot(cycle_data, quantiles): ax.set_xlabel("Completed date") ax.set_ylabel("Cycle time (days)") - ax.plot_date(x=chart_data['completed_date'], y=chart_data['cycle_time'], ms=5) - ax.xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y')) + ax.plot_date(x=chart_data["completed_date"], y=chart_data["cycle_time"], ms=5) + ax.xaxis.set_major_formatter(mdates.DateFormatter("%d/%m/%Y")) _, top = ax.get_ylim() ax.set_ylim(0, top + 1) # Add quantiles left, right = ax.get_xlim() - for quantile, value in chart_data['cycle_time'].quantile(quantiles).iteritems(): - ax.hlines(value, left, right, linestyles='--', linewidths=1) - ax.annotate("%.0f%% (%.0f days)" % ((quantile * 100), value,), + for quantile, value in chart_data["cycle_time"].quantile(quantiles).iteritems(): + ax.hlines(value, left, right, linestyles="--", linewidths=1) + ax.annotate( + "%.0f%% (%.0f days)" % ((quantile * 100), value), xy=(left, value), xytext=(left, value), fontsize="x-small", - ha="left" + ha="left", ) set_chart_style() @@ -815,15 +851,17 @@ def plot_scatterplot(cycle_data, quantiles): # Return as base64 encoded string buffer = io.BytesIO() - fig.savefig(buffer, format='png', bbox_inches='tight', dpi=220) + fig.savefig(buffer, format="png", bbox_inches="tight", dpi=220) plt.close(fig) - return base64.b64encode(buffer.getvalue()).decode('utf-8') + return base64.b64encode(buffer.getvalue()).decode("utf-8") + def int_or_none(value): - return value if isinstance(value, int) else \ - int(value) if isinstance(value, (str, bytes)) and value.isdigit() \ - else None + return ( + value if isinstance(value, int) else int(value) if isinstance(value, (str, bytes)) and value.isdigit() else None + ) + def date_value(query_manager, issue, field_name, default=None): value = default diff --git a/jira_agile_metrics/calculators/progressreport_test.py b/jira_agile_metrics/calculators/progressreport_test.py index 1700b8d..5985fc2 100644 --- a/jira_agile_metrics/calculators/progressreport_test.py +++ b/jira_agile_metrics/calculators/progressreport_test.py @@ -1,17 +1,12 @@ +from datetime import datetime, date, timedelta import random -import pytest + import pandas as pd -from datetime import datetime, date, timedelta -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxChange as Change, - FauxFieldValue as Value -) +import pytest +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxChange as Change, FauxFieldValue as Value from ..querymanager import QueryManager from ..utils import extend_dict - from .progressreport import ( throughput_range_sampler, update_team_sampler, @@ -24,413 +19,456 @@ Outcome, Team, Epic, - ProgressReportCalculator + ProgressReportCalculator, ) + # for debugging - leave off! WRITE_TEST_OUTPUTS = False -statuses = ['Backlog', 'Next', 'Build', 'QA', 'Done'] +statuses = ["Backlog", "Next", "Build", "QA", "Done"] -verbs = [ - 'build', 'launch', 'integrate', 'optimize', 'enable', 'facilitate', - 'MVP for', 'beta-test', 'alpha-test' -] +verbs = ["build", "launch", "integrate", "optimize", "enable", "facilitate", "MVP for", "beta-test", "alpha-test"] nouns = [ - 'stock', 'website', 'system', 'basket', 'engine', 'search', - 'browse', 'configurator', 'marketing', 'brochureware', 'campaign', + "stock", + "website", + "system", + "basket", + "engine", + "search", + "browse", + "configurator", + "marketing", + "brochureware", + "campaign", ] + def random_date_past(start, max_days): return start - timedelta(days=random.randint(0, max_days)) + def random_date_future(start, max_days): return start + timedelta(days=random.randint(0, max_days)) -@pytest.fixture -def fields(custom_fields): + +@pytest.fixture(name="fields") +def fixture_fields(custom_fields): return custom_fields + [ # customfield_001 = Team - {'id': 'customfield_201', 'name': 'Outcome'}, - {'id': 'customfield_202', 'name': 'Deadline'}, - {'id': 'customfield_203', 'name': 'Min stories'}, - {'id': 'customfield_204', 'name': 'Max stories'}, - {'id': 'customfield_205', 'name': 'Epic'}, + {"id": "customfield_201", "name": "Outcome"}, + {"id": "customfield_202", "name": "Deadline"}, + {"id": "customfield_203", "name": "Min stories"}, + {"id": "customfield_204", "name": "Max stories"}, + {"id": "customfield_205", "name": "Epic"}, ] -@pytest.fixture -def settings(custom_settings): - return extend_dict(custom_settings, { - 'quantiles': [0.1, 0.3, 0.5], - 'progress_report': 'progress.html', - 'progress_report_title': 'Test progress report', - 'progress_report_epic_query_template': 'issuetype=epic AND Outcome={outcome}', - 'progress_report_story_query_template': 'issuetype=story AND Epic={epic}', - 'progress_report_epic_deadline_field': 'Deadline', - 'progress_report_epic_min_stories_field': 'Min stories', - 'progress_report_epic_max_stories_field': 'Max stories', - 'progress_report_epic_team_field': 'Team', - 'progress_report_teams': [ - { - 'name': 'Team 1', - 'min_throughput': 5, - 'max_throughput': 10, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - }, { - 'name': 'Team 2', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': 'issuetype=feature AND resolution=Done', - 'throughput_samples_window': 6, - 'wip': 2, - } - ], - 'progress_report_outcomes': [ - { - 'key': 'O1', - 'name': 'Outcome one', - 'deadline': None, - 'epic_query': None - }, { - 'key': None, - 'name': 'Outcome two', - 'deadline': None, - 'epic_query': 'outcome="Outcome two" AND status=in-progress' - } - ], - 'progress_report_outcome_query': None, - 'progress_report_outcome_deadline_field': None, - }) - -@pytest.fixture -def query_manager(fields, settings): - - field_lookup = {v['name'].lower(): v['id'] for v in fields} + +@pytest.fixture(name="settings") +def fixture_settings(custom_settings): + return extend_dict( + custom_settings, + { + "quantiles": [0.1, 0.3, 0.5], + "progress_report": "progress.html", + "progress_report_title": "Test progress report", + "progress_report_epic_query_template": "issuetype=epic AND Outcome={outcome}", + "progress_report_story_query_template": "issuetype=story AND Epic={epic}", + "progress_report_epic_deadline_field": "Deadline", + "progress_report_epic_min_stories_field": "Min stories", + "progress_report_epic_max_stories_field": "Max stories", + "progress_report_epic_team_field": "Team", + "progress_report_teams": [ + { + "name": "Team 1", + "min_throughput": 5, + "max_throughput": 10, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + { + "name": "Team 2", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": "issuetype=feature AND resolution=Done", + "throughput_samples_window": 6, + "wip": 2, + }, + ], + "progress_report_outcomes": [ + {"key": "O1", "name": "Outcome one", "deadline": None, "epic_query": None}, + { + "key": None, + "name": "Outcome two", + "deadline": None, + "epic_query": 'outcome="Outcome two" AND status=in-progress', + }, + ], + "progress_report_outcome_query": None, + "progress_report_outcome_deadline_field": None, + }, + ) + + +@pytest.fixture(name="query_manager") +def fixture_query_manager(fields, settings): + + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) return QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=[ - - # Outcomes as tickets - Issue("O-1", - summary="Outcome ticket one", - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_202='2018-05-01 00:00:00', - changes=[] - ), - - Issue("O-2", - summary="Outcome ticket two", - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_202=None, - changes=[] - ), - - # Epics - Issue("E-1", - summary="Epic 1", - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 1", - customfield_201="O1", - customfield_202='2018-03-01 00:00:00', - customfield_203=10, - customfield_204=15, - changes=[] - ), - - Issue("E-2", - summary="Epic 2", - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 1", - customfield_201="O1", - customfield_202='2018-03-01 00:00:00', - customfield_203=None, - customfield_204=None, - changes=[] - ), - - Issue("E-3", - summary="Epic 3", - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 2", - customfield_201="O1", - customfield_202=None, - customfield_203=5, - customfield_204=5, - changes=[] - ), - - Issue("E-4", - summary="Epic 4", - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 1", - customfield_201="Outcome two", - customfield_202=None, - customfield_203=0, - customfield_204=0, - changes=[] - ), - - Issue("E-4", - summary="Epic 4", - issuetype=Value('Epic', 'epic'), - status=Value('Withdrawn', 'withdrawn'), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_001="Team 2", - customfield_201="Outcome two", - customfield_202=None, - customfield_203=0, - customfield_204=0, - changes=[] - ), - - - # Stories for epic E-1 - Issue("A-1", - summary="Just created", - issuetype=Value("Story", "story"), - status=Value("Backlog", "backlog"), - resolution=None, - resolutiondate=None, - created="2018-01-01 01:01:01", - customfield_205="E-1", - changes=[], - ), - Issue("A-2", - summary="Started", - issuetype=Value("Story", "story"), - status=Value("Next", "next"), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-02 10:01:01", [("Flagged", None, "Impediment")]), - Change("2018-01-03 01:00:00", [("Flagged", "Impediment", "")]), # blocked 1 day in the backlog (doesn't count towards blocked days) - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 10:01:01", [("Flagged", "", "Impediment")]), - Change("2018-01-05 08:01:01", [("Flagged", "Impediment", "")]), # was blocked 1 day - Change("2018-01-08 10:01:01", [("Flagged", "", "Impediment")]), # stays blocked until today - ], - ), - Issue("A-3", - summary="Completed", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-06 01:01:01", - created="2018-01-03 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-04 10:01:01", [("Flagged", None, "Impediment")]), # should clear two days later when issue resolved - Change("2018-01-05 01:01:01", [("status", "Build", "QA",)]), - Change("2018-01-06 01:01:01", [("status", "QA", "Done",)]), - ], - ), - Issue("A-4", - summary="Moved back", - issuetype=Value("Story", "story"), - status=Value("Next", "next"), - resolution=None, - resolutiondate=None, - created="2018-01-04 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-04 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-05 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-06 01:01:01", [("status", "Build", "Next",)]), - Change("2018-01-07 01:01:01", [("Flagged", None, "Awaiting input")]), - Change("2018-01-10 10:01:01", [("Flagged", "Awaiting input", "")]), # blocked 3 days - ], - ), - - # Stories for epic E-2 - Issue("A-5", - summary="Just created", - issuetype=Value("Story", "story"), - status=Value("Backlog", "backlog"), - resolution=None, - resolutiondate=None, - created="2018-01-01 01:01:01", - customfield_205="E-2", - changes=[], - ), - - # No stories for epic E-3 - - # Features, used to calculate team throughput - Issue("F-1", - summary="Just created", - issuetype=Value("Feature", "feature"), - status=Value("Backlog", "backlog"), - resolution=None, - resolutiondate=None, - created="2018-01-01 01:01:01", - changes=[], - ), - Issue("F-2", - summary="Started", - issuetype=Value("Feature", "feature"), - status=Value("Next", "next"), - resolution=None, - resolutiondate=None, - created="2018-01-02 01:01:01", - changes=[ - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - ], - ), - Issue("F-3", - summary="Completed", - issuetype=Value("Feature", "feature"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-06 01:01:01", - created="2018-01-03 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-03 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-04 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-05 01:01:01", [("status", "Build", "QA",)]), - Change("2018-01-06 01:01:01", [("status", "QA", "Done",)]), - ], - ), - Issue("F-4", - summary="Also completed", - issuetype=Value("Feature", "feature"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-06 01:01:03", - created="2018-01-04 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-04 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-05 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-05 01:01:02", [("status", "Build", "QA",)]), - Change("2018-01-06 01:01:03", [("status", "QA", "Done",)]), - ], - ), - Issue("F-5", - summary="Completed on a different day", - issuetype=Value("Feature", "feature"), - status=Value("Done", "done"), - resolution=Value("Done", "Done"), - resolutiondate="2018-01-08 01:01:01", - created="2018-01-04 01:01:01", - customfield_205="E-1", - changes=[ - Change("2018-01-04 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-05 01:01:01", [("status", "Next", "Build",)]), - Change("2018-01-05 01:01:02", [("status", "Build", "QA",)]), - Change("2018-01-08 01:01:03", [("status", "QA", "Done",)]), - ], - ), - - - ]), - settings=settings + jira=JIRA( + fields=fields, + filter_=simple_ql, + issues=[ + # Outcomes as tickets + Issue( + "O-1", + summary="Outcome ticket one", + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_202="2018-05-01 00:00:00", + changes=[], + ), + Issue( + "O-2", + summary="Outcome ticket two", + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_202=None, + changes=[], + ), + # Epics + Issue( + "E-1", + summary="Epic 1", + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 1", + customfield_201="O1", + customfield_202="2018-03-01 00:00:00", + customfield_203=10, + customfield_204=15, + changes=[], + ), + Issue( + "E-2", + summary="Epic 2", + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 1", + customfield_201="O1", + customfield_202="2018-03-01 00:00:00", + customfield_203=None, + customfield_204=None, + changes=[], + ), + Issue( + "E-3", + summary="Epic 3", + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 2", + customfield_201="O1", + customfield_202=None, + customfield_203=5, + customfield_204=5, + changes=[], + ), + Issue( + "E-4", + summary="Epic 4", + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 1", + customfield_201="Outcome two", + customfield_202=None, + customfield_203=0, + customfield_204=0, + changes=[], + ), + Issue( + "E-4", + summary="Epic 4", + issuetype=Value("Epic", "epic"), + status=Value("Withdrawn", "withdrawn"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_001="Team 2", + customfield_201="Outcome two", + customfield_202=None, + customfield_203=0, + customfield_204=0, + changes=[], + ), + # Stories for epic E-1 + Issue( + "A-1", + summary="Just created", + issuetype=Value("Story", "story"), + status=Value("Backlog", "backlog"), + resolution=None, + resolutiondate=None, + created="2018-01-01 01:01:01", + customfield_205="E-1", + changes=[], + ), + Issue( + "A-2", + summary="Started", + issuetype=Value("Story", "story"), + status=Value("Next", "next"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-02 10:01:01", [("Flagged", None, "Impediment")]), + Change( + "2018-01-03 01:00:00", [("Flagged", "Impediment", "")] + ), # blocked 1 day in the backlog (doesn't count towards blocked days) + Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-04 10:01:01", [("Flagged", "", "Impediment")]), + Change("2018-01-05 08:01:01", [("Flagged", "Impediment", "")]), # was blocked 1 day + Change("2018-01-08 10:01:01", [("Flagged", "", "Impediment")]), # stays blocked until today + ], + ), + Issue( + "A-3", + summary="Completed", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-06 01:01:01", + created="2018-01-03 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-04 01:01:01", [("status", "Next", "Build")]), + Change( + "2018-01-04 10:01:01", [("Flagged", None, "Impediment")] + ), # should clear two days later when issue resolved + Change("2018-01-05 01:01:01", [("status", "Build", "QA")]), + Change("2018-01-06 01:01:01", [("status", "QA", "Done")]), + ], + ), + Issue( + "A-4", + summary="Moved back", + issuetype=Value("Story", "story"), + status=Value("Next", "next"), + resolution=None, + resolutiondate=None, + created="2018-01-04 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-04 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-05 01:01:01", [("status", "Next", "Build")]), + Change("2018-01-06 01:01:01", [("status", "Build", "Next")]), + Change("2018-01-07 01:01:01", [("Flagged", None, "Awaiting input")]), + Change("2018-01-10 10:01:01", [("Flagged", "Awaiting input", "")]), # blocked 3 days + ], + ), + # Stories for epic E-2 + Issue( + "A-5", + summary="Just created", + issuetype=Value("Story", "story"), + status=Value("Backlog", "backlog"), + resolution=None, + resolutiondate=None, + created="2018-01-01 01:01:01", + customfield_205="E-2", + changes=[], + ), + # No stories for epic E-3 + # Features, used to calculate team throughput + Issue( + "F-1", + summary="Just created", + issuetype=Value("Feature", "feature"), + status=Value("Backlog", "backlog"), + resolution=None, + resolutiondate=None, + created="2018-01-01 01:01:01", + changes=[], + ), + Issue( + "F-2", + summary="Started", + issuetype=Value("Feature", "feature"), + status=Value("Next", "next"), + resolution=None, + resolutiondate=None, + created="2018-01-02 01:01:01", + changes=[Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")])], + ), + Issue( + "F-3", + summary="Completed", + issuetype=Value("Feature", "feature"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-06 01:01:01", + created="2018-01-03 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-03 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-04 01:01:01", [("status", "Next", "Build")]), + Change("2018-01-05 01:01:01", [("status", "Build", "QA")]), + Change("2018-01-06 01:01:01", [("status", "QA", "Done")]), + ], + ), + Issue( + "F-4", + summary="Also completed", + issuetype=Value("Feature", "feature"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-06 01:01:03", + created="2018-01-04 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-04 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-05 01:01:01", [("status", "Next", "Build")]), + Change("2018-01-05 01:01:02", [("status", "Build", "QA")]), + Change("2018-01-06 01:01:03", [("status", "QA", "Done")]), + ], + ), + Issue( + "F-5", + summary="Completed on a different day", + issuetype=Value("Feature", "feature"), + status=Value("Done", "done"), + resolution=Value("Done", "Done"), + resolutiondate="2018-01-08 01:01:01", + created="2018-01-04 01:01:01", + customfield_205="E-1", + changes=[ + Change("2018-01-04 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-05 01:01:01", [("status", "Next", "Build")]), + Change("2018-01-05 01:01:02", [("status", "Build", "QA")]), + Change("2018-01-08 01:01:03", [("status", "QA", "Done")]), + ], + ), + ], + ), + settings=settings, ) -@pytest.fixture -def results(): + +@pytest.fixture(name="results") +def fixture_results(): return {} + + def test_throughput_range_sampler(): sampler = throughput_range_sampler(5, 5) - for i in range(10): + for _ in range(10): assert sampler() == 5 - + sampler = throughput_range_sampler(5, 10) - for i in range(10): + for _ in range(10): assert 5 <= sampler() <= 10 + def test_calculate_epic_target(): - assert calculate_epic_target(Epic( - key='E-1', - summary='Epic 1', - status='in-progress', - resolution=None, - resolution_date=None, - min_stories=5, - max_stories=5, - team_name='Team 1', - deadline=None, - stories_raised=None - )) == 5 - - assert calculate_epic_target(Epic( - key='E-1', - summary='Epic 1', - status='in-progress', - resolution=None, - resolution_date=None, - min_stories=8, - max_stories=5, - team_name='Team 1', - deadline=None, - stories_raised=None - )) == 8 - - assert calculate_epic_target(Epic( - key='E-1', - summary='Epic 1', - status='in-progress', - resolution=None, - resolution_date=None, - min_stories=0, - max_stories=3, - team_name='Team 1', - deadline=None, - stories_raised=6 - )) <= 3 + assert ( + calculate_epic_target( + Epic( + key="E-1", + summary="Epic 1", + status="in-progress", + resolution=None, + resolution_date=None, + min_stories=5, + max_stories=5, + team_name="Team 1", + deadline=None, + stories_raised=None, + ) + ) + == 5 + ) + + assert ( + calculate_epic_target( + Epic( + key="E-1", + summary="Epic 1", + status="in-progress", + resolution=None, + resolution_date=None, + min_stories=8, + max_stories=5, + team_name="Team 1", + deadline=None, + stories_raised=None, + ) + ) + == 8 + ) + + assert ( + calculate_epic_target( + Epic( + key="E-1", + summary="Epic 1", + status="in-progress", + resolution=None, + resolution_date=None, + min_stories=0, + max_stories=3, + team_name="Team 1", + deadline=None, + stories_raised=6, + ) + ) + <= 3 + ) + def test_find_outcomes(query_manager): - - outcomes = list(find_outcomes( - query_manager=query_manager, - query="issuetype=outcome", - outcome_deadline_field="customfield_202", - epic_query_template="issuetype=epic AND Outcome={outcome}" - )) + + outcomes = list( + find_outcomes( + query_manager=query_manager, + query="issuetype=outcome", + outcome_deadline_field="customfield_202", + epic_query_template="issuetype=epic AND Outcome={outcome}", + ) + ) assert len(outcomes) == 2 - + assert outcomes[0].key == "O-1" assert outcomes[0].name == "Outcome ticket one" assert outcomes[0].deadline == datetime(2018, 5, 1, 0, 0, 0) @@ -441,17 +479,20 @@ def test_find_outcomes(query_manager): assert outcomes[1].deadline is None assert outcomes[1].epic_query == 'issuetype=epic AND Outcome="O-2"' + def test_find_outcomes_no_deadline_field(query_manager): - - outcomes = list(find_outcomes( - query_manager=query_manager, - query="issuetype=outcome", - outcome_deadline_field=None, - epic_query_template="issuetype=epic AND Outcome={outcome}" - )) + + outcomes = list( + find_outcomes( + query_manager=query_manager, + query="issuetype=outcome", + outcome_deadline_field=None, + epic_query_template="issuetype=epic AND Outcome={outcome}", + ) + ) assert len(outcomes) == 2 - + assert outcomes[0].key == "O-1" assert outcomes[0].name == "Outcome ticket one" assert outcomes[0].deadline is None @@ -462,130 +503,140 @@ def test_find_outcomes_no_deadline_field(query_manager): assert outcomes[1].deadline is None assert outcomes[1].epic_query == 'issuetype=epic AND Outcome="O-2"' + def test_find_epics(query_manager): - outcome = Outcome("Outcome one", "O1", None, 'issuetype=epic AND Outcome=O1') - - epics = list(find_epics( - query_manager=query_manager, - epic_min_stories_field='customfield_203', - epic_max_stories_field='customfield_204', - epic_team_field='customfield_001', - epic_deadline_field='customfield_202', - outcome=outcome) + outcome = Outcome("Outcome one", "O1", None, "issuetype=epic AND Outcome=O1") + + epics = list( + find_epics( + query_manager=query_manager, + epic_min_stories_field="customfield_203", + epic_max_stories_field="customfield_204", + epic_team_field="customfield_001", + epic_deadline_field="customfield_202", + outcome=outcome, + ) ) assert len(epics) == 3 assert epics[0].__dict__ == { - 'key': 'E-1', - 'summary': 'Epic 1', - 'status': 'In progress', - 'resolution': None, - 'resolution_date': None, - 'team_name': 'Team 1', - 'deadline': datetime(2018, 3, 1, 0, 0), - 'min_stories': 10, - 'max_stories': 15, - 'story_cycle_times': None, - 'stories_raised': None, - 'stories_in_backlog': None, - 'stories_in_progress': None, - 'stories_done': None, - 'first_story_started': None, - 'last_story_finished': None, - 'outcome': outcome, - 'team': None, - 'forecast': None, - 'story_query': None, + "key": "E-1", + "summary": "Epic 1", + "status": "In progress", + "resolution": None, + "resolution_date": None, + "team_name": "Team 1", + "deadline": datetime(2018, 3, 1, 0, 0), + "min_stories": 10, + "max_stories": 15, + "story_cycle_times": None, + "stories_raised": None, + "stories_in_backlog": None, + "stories_in_progress": None, + "stories_done": None, + "first_story_started": None, + "last_story_finished": None, + "outcome": outcome, + "team": None, + "forecast": None, + "story_query": None, } - assert epics[1].key == 'E-2' - assert epics[2].key == 'E-3' + assert epics[1].key == "E-2" + assert epics[2].key == "E-3" + def test_find_epics_minimal_fields(query_manager): - outcome = Outcome("Outcome one", "O1", None, 'issuetype=epic AND Outcome=O1') + outcome = Outcome("Outcome one", "O1", None, "issuetype=epic AND Outcome=O1") - epics = list(find_epics( - query_manager=query_manager, - epic_min_stories_field=None, - epic_max_stories_field=None, - epic_team_field=None, - epic_deadline_field=None, - outcome=outcome) + epics = list( + find_epics( + query_manager=query_manager, + epic_min_stories_field=None, + epic_max_stories_field=None, + epic_team_field=None, + epic_deadline_field=None, + outcome=outcome, + ) ) assert len(epics) == 3 assert epics[0].__dict__ == { - 'key': 'E-1', - 'summary': 'Epic 1', - 'status': 'In progress', - 'resolution': None, - 'resolution_date': None, - 'team_name': None, - 'deadline': None, - 'min_stories': None, - 'max_stories': None, - 'story_cycle_times': None, - 'stories_raised': None, - 'stories_in_backlog': None, - 'stories_in_progress': None, - 'stories_done': None, - 'first_story_started': None, - 'last_story_finished': None, - 'outcome': outcome, - 'team': None, - 'forecast': None, - 'story_query': None, + "key": "E-1", + "summary": "Epic 1", + "status": "In progress", + "resolution": None, + "resolution_date": None, + "team_name": None, + "deadline": None, + "min_stories": None, + "max_stories": None, + "story_cycle_times": None, + "stories_raised": None, + "stories_in_backlog": None, + "stories_in_progress": None, + "stories_done": None, + "first_story_started": None, + "last_story_finished": None, + "outcome": outcome, + "team": None, + "forecast": None, + "story_query": None, } - assert epics[1].key == 'E-2' - assert epics[2].key == 'E-3' + assert epics[1].key == "E-2" + assert epics[2].key == "E-3" + def test_find_epics_defaults_to_outcome_deadline(query_manager): - outcome = Outcome("Outcome one", "O1", datetime(2019, 6, 1), 'issuetype=epic AND Outcome=O1') - - epics = list(find_epics( - query_manager=query_manager, - epic_min_stories_field='customfield_203', - epic_max_stories_field='customfield_204', - epic_team_field='customfield_001', - epic_deadline_field='customfield_202', - outcome=outcome) + outcome = Outcome("Outcome one", "O1", datetime(2019, 6, 1), "issuetype=epic AND Outcome=O1") + + epics = list( + find_epics( + query_manager=query_manager, + epic_min_stories_field="customfield_203", + epic_max_stories_field="customfield_204", + epic_team_field="customfield_001", + epic_deadline_field="customfield_202", + outcome=outcome, + ) ) assert len(epics) == 3 assert epics[0].__dict__ == { - 'key': 'E-1', - 'summary': 'Epic 1', - 'status': 'In progress', - 'resolution': None, - 'resolution_date': None, - 'team_name': 'Team 1', - 'deadline': datetime(2018, 3, 1, 0, 0), - 'min_stories': 10, - 'max_stories': 15, - 'story_cycle_times': None, - 'stories_raised': None, - 'stories_in_backlog': None, - 'stories_in_progress': None, - 'stories_done': None, - 'first_story_started': None, - 'last_story_finished': None, - 'outcome': outcome, - 'team': None, - 'forecast': None, - 'story_query': None, + "key": "E-1", + "summary": "Epic 1", + "status": "In progress", + "resolution": None, + "resolution_date": None, + "team_name": "Team 1", + "deadline": datetime(2018, 3, 1, 0, 0), + "min_stories": 10, + "max_stories": 15, + "story_cycle_times": None, + "stories_raised": None, + "stories_in_backlog": None, + "stories_in_progress": None, + "stories_done": None, + "first_story_started": None, + "last_story_finished": None, + "outcome": outcome, + "team": None, + "forecast": None, + "story_query": None, } - assert epics[1].key == 'E-2' + assert epics[1].key == "E-2" assert epics[1].deadline == datetime(2018, 3, 1, 0, 0) - assert epics[2].key == 'E-3' + assert epics[2].key == "E-3" assert epics[2].deadline == datetime(2019, 6, 1, 0, 0) + def test_update_story_counts(query_manager, settings): - - e1 = Epic( + + epic_1 = Epic( key="E-1", summary="Epic 1", status="in-progress", @@ -595,28 +646,28 @@ def test_update_story_counts(query_manager, settings): max_stories=5, team_name=None, deadline=None, - story_query="issuetype=story AND epic=E-1" + story_query="issuetype=story AND epic=E-1", ) update_story_counts( - epic=e1, + epic=epic_1, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'] + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], ) - assert e1.stories_raised == 4 - assert e1.stories_in_backlog == 1 - assert e1.stories_in_progress == 2 - assert e1.stories_done == 1 - assert e1.first_story_started == date(2018, 1, 3) - assert e1.last_story_finished == date(2018, 1, 6) - assert e1.min_stories == 4 - assert e1.max_stories == 5 - assert isinstance(e1.story_cycle_times, pd.DataFrame) - - e2 = Epic( + assert epic_1.stories_raised == 4 + assert epic_1.stories_in_backlog == 1 + assert epic_1.stories_in_progress == 2 + assert epic_1.stories_done == 1 + assert epic_1.first_story_started == date(2018, 1, 3) + assert epic_1.last_story_finished == date(2018, 1, 6) + assert epic_1.min_stories == 4 + assert epic_1.max_stories == 5 + assert isinstance(epic_1.story_cycle_times, pd.DataFrame) + + epic_2 = Epic( key="E-2", summary="Epic 2", status="in-progress", @@ -626,28 +677,28 @@ def test_update_story_counts(query_manager, settings): max_stories=None, team_name=None, deadline=None, - story_query="issuetype=story AND epic=E-2" + story_query="issuetype=story AND epic=E-2", ) update_story_counts( - epic=e2, + epic=epic_2, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'] + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], ) - assert e2.stories_raised == 1 - assert e2.stories_in_backlog == 1 - assert e2.stories_in_progress == 0 - assert e2.stories_done == 0 - assert e2.first_story_started is None - assert e2.last_story_finished is None - assert e2.min_stories == 1 - assert e2.max_stories == 1 - assert isinstance(e2.story_cycle_times, pd.DataFrame) - - e3 = Epic( + assert epic_2.stories_raised == 1 + assert epic_2.stories_in_backlog == 1 + assert epic_2.stories_in_progress == 0 + assert epic_2.stories_done == 0 + assert epic_2.first_story_started is None + assert epic_2.last_story_finished is None + assert epic_2.min_stories == 1 + assert epic_2.max_stories == 1 + assert isinstance(epic_2.story_cycle_times, pd.DataFrame) + + epic_3 = Epic( key="E-3", summary="Epic 3", status="in-progress", @@ -657,126 +708,128 @@ def test_update_story_counts(query_manager, settings): max_stories=0, team_name=None, deadline=None, - story_query="issuetype=story AND epic=E-3" + story_query="issuetype=story AND epic=E-3", ) update_story_counts( - epic=e3, + epic=epic_3, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'] + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], ) - assert e3.stories_raised == 0 - assert e3.stories_in_backlog == 0 - assert e3.stories_in_progress == 0 - assert e3.stories_done == 0 - assert e3.first_story_started is None - assert e3.last_story_finished is None - assert e3.min_stories == 0 - assert e3.max_stories == 1 - assert isinstance(e3.story_cycle_times, pd.DataFrame) + assert epic_3.stories_raised == 0 + assert epic_3.stories_in_backlog == 0 + assert epic_3.stories_in_progress == 0 + assert epic_3.stories_done == 0 + assert epic_3.first_story_started is None + assert epic_3.last_story_finished is None + assert epic_3.min_stories == 0 + assert epic_3.max_stories == 1 + assert isinstance(epic_3.story_cycle_times, pd.DataFrame) + def test_calculate_team_throughput(query_manager, settings): - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=None, max_throughput=None, - throughput_samples='issuetype=feature', + throughput_samples="issuetype=feature", throughput_samples_window=None, ) throughput = calculate_team_throughput( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) assert list(throughput.index) == [ - pd.Timestamp('2018-01-06'), - pd.Timestamp('2018-01-07'), - pd.Timestamp('2018-01-08'), + pd.Timestamp("2018-01-06"), + pd.Timestamp("2018-01-07"), + pd.Timestamp("2018-01-08"), ] - assert throughput.to_dict('records') == [ - {'count': 2}, - {'count': 0}, - {'count': 1}, + assert throughput.to_dict("records") == [ + {"count": 2}, + {"count": 0}, + {"count": 1}, ] - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=None, max_throughput=None, - throughput_samples='issuetype=feature', + throughput_samples="issuetype=feature", throughput_samples_window=2, ) throughput = calculate_team_throughput( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) assert list(throughput.index) == [ - pd.Timestamp('2018-01-07'), - pd.Timestamp('2018-01-08'), + pd.Timestamp("2018-01-07"), + pd.Timestamp("2018-01-08"), ] - assert throughput.to_dict('records') == [ - {'count': 0}, - {'count': 1}, + assert throughput.to_dict("records") == [ + {"count": 0}, + {"count": 1}, ] - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=None, max_throughput=None, - throughput_samples='issuetype=feature', + throughput_samples="issuetype=feature", throughput_samples_window=5, ) throughput = calculate_team_throughput( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) assert list(throughput.index) == [ - pd.Timestamp('2018-01-04'), - pd.Timestamp('2018-01-05'), - pd.Timestamp('2018-01-06'), - pd.Timestamp('2018-01-07'), - pd.Timestamp('2018-01-08'), + pd.Timestamp("2018-01-04"), + pd.Timestamp("2018-01-05"), + pd.Timestamp("2018-01-06"), + pd.Timestamp("2018-01-07"), + pd.Timestamp("2018-01-08"), ] - assert throughput.to_dict('records') == [ - {'count': 0}, - {'count': 0}, - {'count': 2}, - {'count': 0}, - {'count': 1}, + assert throughput.to_dict("records") == [ + {"count": 0}, + {"count": 0}, + {"count": 2}, + {"count": 0}, + {"count": 1}, ] - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) + def test_update_team_sampler(query_manager, settings): - + # min/max only - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=5, @@ -786,116 +839,113 @@ def test_update_team_sampler(query_manager, settings): ) update_team_sampler( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) - assert t.sampler.__name__ == 'get_throughput_range_sample' - assert t.throughput_samples_cycle_times is None + assert team.sampler.__name__ == "get_throughput_range_sample" + assert team.throughput_samples_cycle_times is None # query only - with completed stories - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=None, max_throughput=None, - throughput_samples='issuetype=feature', + throughput_samples="issuetype=feature", throughput_samples_window=None, ) update_team_sampler( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) - assert t.sampler.__name__ == 'get_throughput_sample' - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert team.sampler.__name__ == "get_throughput_sample" + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) # query only - no completed stories - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=None, max_throughput=None, - throughput_samples='issuetype=notfound', + throughput_samples="issuetype=notfound", throughput_samples_window=None, ) update_team_sampler( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) - assert t.sampler is None - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert team.sampler is None + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) # query with no completed stories + min/max - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=5, max_throughput=10, - throughput_samples='issuetype=notfound', + throughput_samples="issuetype=notfound", throughput_samples_window=None, ) update_team_sampler( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) - assert t.sampler.__name__ == 'get_throughput_range_sample' - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert team.sampler.__name__ == "get_throughput_range_sample" + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) # query with completed stories + min/max - t = Team( + team = Team( name="Team 1", wip=1, min_throughput=5, max_throughput=10, - throughput_samples='issuetype=feature', + throughput_samples="issuetype=feature", throughput_samples_window=None, ) update_team_sampler( - team=t, + team=team, query_manager=query_manager, - cycle=settings['cycle'], - backlog_column=settings['backlog_column'], - done_column=settings['done_column'], - frequency='1D' + cycle=settings["cycle"], + backlog_column=settings["backlog_column"], + done_column=settings["done_column"], + frequency="1D", ) - assert t.sampler.__name__ == 'get_throughput_sample' - assert isinstance(t.throughput_samples_cycle_times, pd.DataFrame) + assert team.sampler.__name__ == "get_throughput_sample" + assert isinstance(team.throughput_samples_cycle_times, pd.DataFrame) + def test_forecast_to_complete_wip_1(): - - team = Team( - name='Team 1', - wip=1, - sampler=throughput_range_sampler(2, 2) # makes tests predictable - ) + + team = Team(name="Team 1", wip=1, sampler=throughput_range_sampler(2, 2)) # makes tests predictable epics = [ Epic( @@ -906,7 +956,7 @@ def test_forecast_to_complete_wip_1(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=None, team=team, stories_raised=8, @@ -922,7 +972,7 @@ def test_forecast_to_complete_wip_1(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 1, 20), # <5 weeks away team=team, stories_raised=10, @@ -938,14 +988,14 @@ def test_forecast_to_complete_wip_1(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 3, 1), # >7 weeks away team=team, stories_raised=10, stories_in_backlog=5, stories_in_progress=0, stories_done=6, # 10 - 6 = 4 left; 2/wk from sampler => 2 weeks - ) + ), ] forecast_to_complete(team, epics, [0.5, 0.9], trials=10, now=datetime(2018, 1, 10)) @@ -963,14 +1013,11 @@ def test_forecast_to_complete_wip_1(): assert epics[2].forecast.quantiles == [(0.5, 7.0), (0.9, 7.0)] # +2 weeks after E-2 since wip=1 assert epics[2].forecast.deadline_quantile == 1 # deadline is after worst case scenario + def test_forecast_to_complete_wip_2(): # double the wip, but also double the throughput of wip=1 test - team = Team( - name='Team 1', - wip=2, - sampler=throughput_range_sampler(4, 4) # makes tests predictable - ) + team = Team(name="Team 1", wip=2, sampler=throughput_range_sampler(4, 4)) # makes tests predictable epics = [ Epic( @@ -981,7 +1028,7 @@ def test_forecast_to_complete_wip_2(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=None, team=team, stories_raised=8, @@ -997,7 +1044,7 @@ def test_forecast_to_complete_wip_2(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 1, 20), # <2 weeks away team=team, stories_raised=10, @@ -1013,14 +1060,14 @@ def test_forecast_to_complete_wip_2(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 3, 1), # >4 weeks away team=team, stories_raised=10, stories_in_backlog=5, stories_in_progress=0, stories_done=6, # 10 - 6 = 4 left; 2/wk from sampler => 2 weeks, starting after E-2 - ) + ), ] forecast_to_complete(team, epics, [0.5, 0.9], trials=10, now=datetime(2018, 1, 10)) @@ -1035,15 +1082,15 @@ def test_forecast_to_complete_wip_2(): assert epics[1].forecast.quantiles == [(0.5, 2.0), (0.9, 2.0)] # +2 weeks in parallel with E-1 since wip=2 assert epics[1].forecast.deadline_quantile == 1 # deadline is same week as best case scenario - assert epics[2].forecast.quantiles == [(0.5, 4.0), (0.9, 4.0)] # +2 weeks after E-2 since wip=2 and it finishes first + assert epics[2].forecast.quantiles == [ + (0.5, 4.0), + (0.9, 4.0), + ] # +2 weeks after E-2 since wip=2 and it finishes first assert epics[2].forecast.deadline_quantile == 1 # deadline is after worst case scenario + def test_forecast_to_complete_no_epics(): - team = Team( - name='Team 1', - wip=1, - sampler=throughput_range_sampler(2, 2) # makes tests predictable - ) + team = Team(name="Team 1", wip=1, sampler=throughput_range_sampler(2, 2)) # makes tests predictable epics = [] @@ -1051,13 +1098,10 @@ def test_forecast_to_complete_no_epics(): assert len(epics) == 0 + def test_forecast_to_complete_with_randomness(): - - team = Team( - name='Team 1', - wip=2, - sampler=throughput_range_sampler(4, 9) # makes tests predictable - ) + + team = Team(name="Team 1", wip=2, sampler=throughput_range_sampler(4, 9)) # makes tests predictable epics = [ Epic( @@ -1068,7 +1112,7 @@ def test_forecast_to_complete_with_randomness(): resolution_date=None, min_stories=10, max_stories=15, - team_name='Team 1', + team_name="Team 1", deadline=None, team=team, stories_raised=8, @@ -1084,7 +1128,7 @@ def test_forecast_to_complete_with_randomness(): resolution_date=None, min_stories=10, max_stories=20, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 1, 20), team=team, stories_raised=10, @@ -1100,14 +1144,14 @@ def test_forecast_to_complete_with_randomness(): resolution_date=None, min_stories=10, max_stories=10, - team_name='Team 1', + team_name="Team 1", deadline=datetime(2018, 3, 1), team=team, stories_raised=10, stories_in_backlog=5, stories_in_progress=0, stories_done=6, - ) + ), ] forecast_to_complete(team, epics, [0.5, 0.9], trials=100, now=datetime(2018, 1, 10)) @@ -1125,786 +1169,795 @@ def test_forecast_to_complete_with_randomness(): assert [q[0] for q in epics[2].forecast.quantiles] == [0.5, 0.9] assert epics[2].forecast.deadline_quantile == 1 # deadline is after worst case scenario + def test_calculator(query_manager, settings, results): - + calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=10, now=datetime(2018, 1, 10)) # confirm it has set up the two outcomes - assert len(data['outcomes']) == 2 - assert data['outcomes'][0].name == 'Outcome one' - assert data['outcomes'][0].key == 'O1' - assert data['outcomes'][1].name == 'Outcome two' - assert data['outcomes'][1].key == 'Outcome two' + assert len(data["outcomes"]) == 2 + assert data["outcomes"][0].name == "Outcome one" + assert data["outcomes"][0].key == "O1" + assert data["outcomes"][1].name == "Outcome two" + assert data["outcomes"][1].key == "Outcome two" # confirm it has found the right epics for each outcome - assert [e.key for e in data['outcomes'][0].epics] == ['E-1', 'E-2', 'E-3'] - assert [e.key for e in data['outcomes'][1].epics] == ['E-4'] + assert [e.key for e in data["outcomes"][0].epics] == ["E-1", "E-2", "E-3"] + assert [e.key for e in data["outcomes"][1].epics] == ["E-4"] # confirm it has mapped the right teams to the right epics - assert [e.team.name for e in data['outcomes'][0].epics] == ['Team 1', 'Team 1', 'Team 2'] - assert [e.team.name for e in data['outcomes'][1].epics] == ['Team 1'] + assert [e.team.name for e in data["outcomes"][0].epics] == ["Team 1", "Team 1", "Team 2"] + assert [e.team.name for e in data["outcomes"][1].epics] == ["Team 1"] # confirm it has updated stories count as per `update_story_counts()` - assert data['outcomes'][0].epics[0].stories_raised == 4 - assert data['outcomes'][0].epics[0].stories_in_backlog == 1 - assert data['outcomes'][0].epics[0].stories_in_progress == 2 - assert data['outcomes'][0].epics[0].stories_done == 1 - assert data['outcomes'][0].epics[0].first_story_started == date(2018, 1, 3) - assert data['outcomes'][0].epics[0].last_story_finished == date(2018, 1, 6) + assert data["outcomes"][0].epics[0].stories_raised == 4 + assert data["outcomes"][0].epics[0].stories_in_backlog == 1 + assert data["outcomes"][0].epics[0].stories_in_progress == 2 + assert data["outcomes"][0].epics[0].stories_done == 1 + assert data["outcomes"][0].epics[0].first_story_started == date(2018, 1, 3) + assert data["outcomes"][0].epics[0].last_story_finished == date(2018, 1, 6) # confirm it has attempted a forecast - assert data['outcomes'][0].epics[0].forecast is not None - assert data['outcomes'][0].epics[0].forecast.deadline_quantile is not None - assert [q[0] for q in data['outcomes'][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] + assert data["outcomes"][0].epics[0].forecast is not None + assert data["outcomes"][0].epics[0].forecast.deadline_quantile is not None + assert [q[0] for q in data["outcomes"][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] # confirm teams - assert len(data['teams']) == 2 - - assert data['teams'][0].name == 'Team 1' - assert data['teams'][0].min_throughput == 5 - assert data['teams'][0].max_throughput == 10 - assert data['teams'][0].throughput_samples is None - assert data['teams'][0].throughput_samples_window is None - - assert data['teams'][1].name == 'Team 2' - assert data['teams'][1].min_throughput is None - assert data['teams'][1].max_throughput is None - assert data['teams'][1].throughput_samples == 'issuetype=feature AND resolution=Done' - assert data['teams'][1].throughput_samples_window == 6 + assert len(data["teams"]) == 2 + + assert data["teams"][0].name == "Team 1" + assert data["teams"][0].min_throughput == 5 + assert data["teams"][0].max_throughput == 10 + assert data["teams"][0].throughput_samples is None + assert data["teams"][0].throughput_samples_window is None + + assert data["teams"][1].name == "Team 2" + assert data["teams"][1].min_throughput is None + assert data["teams"][1].max_throughput is None + assert data["teams"][1].throughput_samples == "issuetype=feature AND resolution=Done" + assert data["teams"][1].throughput_samples_window == 6 # results[ProgressReportCalculator] = data # calculator.write() + def test_calculator_no_outcomes(query_manager, settings, results): - settings = extend_dict(settings, { - 'progress_report_epic_query_template': 'issuetype=epic AND Outcome="O1', - 'progress_report_outcomes': [], - }) - + settings = extend_dict( + settings, + {"progress_report_epic_query_template": 'issuetype=epic AND Outcome="O1', "progress_report_outcomes": []}, + ) + calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=10, now=datetime(2018, 1, 10)) # confirm it has set up the two outcomes - assert len(data['outcomes']) == 1 - assert data['outcomes'][0].name is None - assert data['outcomes'][0].key is None + assert len(data["outcomes"]) == 1 + assert data["outcomes"][0].name is None + assert data["outcomes"][0].key is None # confirm it has found the right epics for each outcome - assert [e.key for e in data['outcomes'][0].epics] == ['E-1', 'E-2', 'E-3'] + assert [e.key for e in data["outcomes"][0].epics] == ["E-1", "E-2", "E-3"] # confirm it has mapped the right teams to the right epics - assert [e.team.name for e in data['outcomes'][0].epics] == ['Team 1', 'Team 1', 'Team 2'] + assert [e.team.name for e in data["outcomes"][0].epics] == ["Team 1", "Team 1", "Team 2"] # confirm it has updated stories count as per `update_story_counts()` - assert data['outcomes'][0].epics[0].stories_raised == 4 - assert data['outcomes'][0].epics[0].stories_in_backlog == 1 - assert data['outcomes'][0].epics[0].stories_in_progress == 2 - assert data['outcomes'][0].epics[0].stories_done == 1 - assert data['outcomes'][0].epics[0].first_story_started == date(2018, 1, 3) - assert data['outcomes'][0].epics[0].last_story_finished == date(2018, 1, 6) + assert data["outcomes"][0].epics[0].stories_raised == 4 + assert data["outcomes"][0].epics[0].stories_in_backlog == 1 + assert data["outcomes"][0].epics[0].stories_in_progress == 2 + assert data["outcomes"][0].epics[0].stories_done == 1 + assert data["outcomes"][0].epics[0].first_story_started == date(2018, 1, 3) + assert data["outcomes"][0].epics[0].last_story_finished == date(2018, 1, 6) # confirm it has attempted a forecast - assert data['outcomes'][0].epics[0].forecast is not None - assert data['outcomes'][0].epics[0].forecast.deadline_quantile is not None - assert [q[0] for q in data['outcomes'][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] + assert data["outcomes"][0].epics[0].forecast is not None + assert data["outcomes"][0].epics[0].forecast.deadline_quantile is not None + assert [q[0] for q in data["outcomes"][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] # confirm teams - assert len(data['teams']) == 2 - - assert data['teams'][0].name == 'Team 1' - assert data['teams'][0].min_throughput == 5 - assert data['teams'][0].max_throughput == 10 - assert data['teams'][0].throughput_samples is None - assert data['teams'][0].throughput_samples_window is None - - assert data['teams'][1].name == 'Team 2' - assert data['teams'][1].min_throughput is None - assert data['teams'][1].max_throughput is None - assert data['teams'][1].throughput_samples == 'issuetype=feature AND resolution=Done' - assert data['teams'][1].throughput_samples_window == 6 + assert len(data["teams"]) == 2 + + assert data["teams"][0].name == "Team 1" + assert data["teams"][0].min_throughput == 5 + assert data["teams"][0].max_throughput == 10 + assert data["teams"][0].throughput_samples is None + assert data["teams"][0].throughput_samples_window is None + + assert data["teams"][1].name == "Team 2" + assert data["teams"][1].min_throughput is None + assert data["teams"][1].max_throughput is None + assert data["teams"][1].throughput_samples == "issuetype=feature AND resolution=Done" + assert data["teams"][1].throughput_samples_window == 6 # results[ProgressReportCalculator] = data # calculator.write() + def test_calculator_no_fields(query_manager, settings, results): - settings = extend_dict(settings, { - 'progress_report_epic_deadline_field': None, - 'progress_report_epic_min_stories_field': None, - 'progress_report_epic_max_stories_field': None, - 'progress_report_epic_team_field': None, - 'progress_report_teams': [ - { - 'name': 'Team 1', - 'min_throughput': 5, - 'max_throughput': 10, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - } - ], - }) + settings = extend_dict( + settings, + { + "progress_report_epic_deadline_field": None, + "progress_report_epic_min_stories_field": None, + "progress_report_epic_max_stories_field": None, + "progress_report_epic_team_field": None, + "progress_report_teams": [ + { + "name": "Team 1", + "min_throughput": 5, + "max_throughput": 10, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + } + ], + }, + ) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=10, now=datetime(2018, 1, 10)) # confirm it has set up the two outcomes - assert len(data['outcomes']) == 2 - assert data['outcomes'][0].name == 'Outcome one' - assert data['outcomes'][0].key == 'O1' - assert data['outcomes'][1].name == 'Outcome two' - assert data['outcomes'][1].key == 'Outcome two' + assert len(data["outcomes"]) == 2 + assert data["outcomes"][0].name == "Outcome one" + assert data["outcomes"][0].key == "O1" + assert data["outcomes"][1].name == "Outcome two" + assert data["outcomes"][1].key == "Outcome two" # confirm it has found the right epics for each outcome - assert [e.key for e in data['outcomes'][0].epics] == ['E-1', 'E-2', 'E-3'] - assert [e.key for e in data['outcomes'][1].epics] == ['E-4'] + assert [e.key for e in data["outcomes"][0].epics] == ["E-1", "E-2", "E-3"] + assert [e.key for e in data["outcomes"][1].epics] == ["E-4"] # all epics use the default team - assert [e.team.name for e in data['outcomes'][0].epics] == ['Team 1', 'Team 1', 'Team 1'] - assert [e.team.name for e in data['outcomes'][1].epics] == ['Team 1'] + assert [e.team.name for e in data["outcomes"][0].epics] == ["Team 1", "Team 1", "Team 1"] + assert [e.team.name for e in data["outcomes"][1].epics] == ["Team 1"] # confirm it has updated stories count as per `update_story_counts()` - assert data['outcomes'][0].epics[0].stories_raised == 4 - assert data['outcomes'][0].epics[0].stories_in_backlog == 1 - assert data['outcomes'][0].epics[0].stories_in_progress == 2 - assert data['outcomes'][0].epics[0].stories_done == 1 - assert data['outcomes'][0].epics[0].first_story_started == date(2018, 1, 3) - assert data['outcomes'][0].epics[0].last_story_finished == date(2018, 1, 6) + assert data["outcomes"][0].epics[0].stories_raised == 4 + assert data["outcomes"][0].epics[0].stories_in_backlog == 1 + assert data["outcomes"][0].epics[0].stories_in_progress == 2 + assert data["outcomes"][0].epics[0].stories_done == 1 + assert data["outcomes"][0].epics[0].first_story_started == date(2018, 1, 3) + assert data["outcomes"][0].epics[0].last_story_finished == date(2018, 1, 6) # confirm it has attempted a forecast - assert data['outcomes'][0].epics[0].forecast is not None - assert data['outcomes'][0].epics[0].forecast.deadline_quantile is None - assert [q[0] for q in data['outcomes'][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] + assert data["outcomes"][0].epics[0].forecast is not None + assert data["outcomes"][0].epics[0].forecast.deadline_quantile is None + assert [q[0] for q in data["outcomes"][0].epics[0].forecast.quantiles] == [0.1, 0.3, 0.5] # confirm teams - assert len(data['teams']) == 1 - - assert data['teams'][0].name == 'Team 1' - assert data['teams'][0].min_throughput == 5 - assert data['teams'][0].max_throughput == 10 - assert data['teams'][0].throughput_samples is None - assert data['teams'][0].throughput_samples_window is None + assert len(data["teams"]) == 1 + + assert data["teams"][0].name == "Team 1" + assert data["teams"][0].min_throughput == 5 + assert data["teams"][0].max_throughput == 10 + assert data["teams"][0].throughput_samples is None + assert data["teams"][0].throughput_samples_window is None # results[ProgressReportCalculator] = data # calculator.write() + def test_with_large_dataset(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-large.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': random.randint(5, 8), - 'max_throughput': random.randint(10, 15), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - }, { - 'name': 'Blue', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': 'issuetype=story AND Team=Blue', - 'throughput_samples_window': 6, - 'wip': random.randint(1, 3), - } - ], - 'progress_report_outcomes': [ - { - 'key': 'O1', - 'name': 'MVP', - 'deadline': random_date_future(today + timedelta(days=55), 65), - 'epic_query': None, - }, { - 'key': 'O2', - 'name': 'Asia launch', - 'deadline': None, - 'epic_query': None, - }, { - 'key': 'O3', - 'name': 'Europe revamp', - 'deadline': None, - 'epic_query': None, - } - ], - }) - - teams = [t['name'] for t in settings['progress_report_teams']] - outcomes = [o['key'] for o in settings['progress_report_outcomes']] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice(outcomes), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-large.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": random.randint(5, 8), + "max_throughput": random.randint(10, 15), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + { + "name": "Blue", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": "issuetype=story AND Team=Blue", + "throughput_samples_window": 6, + "wip": random.randint(1, 3), + }, + ], + "progress_report_outcomes": [ + { + "key": "O1", + "name": "MVP", + "deadline": random_date_future(today + timedelta(days=55), 65), + "epic_query": None, + }, + {"key": "O2", "name": "Asia launch", "deadline": None, "epic_query": None}, + {"key": "O3", "name": "Europe revamp", "deadline": None, "epic_query": None}, + ], + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + outcomes = [o["key"] for o in settings["progress_report_outcomes"]] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice(outcomes), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] - query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=epics + stories), - settings=settings - ) + query_manager = QueryManager(jira=JIRA(fields=fields, filter_=simple_ql, issues=epics + stories), settings=settings) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['outcomes']) == 3 - assert len(data['teams']) == 2 + assert len(data["outcomes"]) == 3 + assert len(data["teams"]) == 2 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_and_outcome_as_tickets(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-outcome-tickets.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': random.randint(5, 8), - 'max_throughput': random.randint(10, 15), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - }, { - 'name': 'Blue', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': 'issuetype=story AND Team=Blue', - 'throughput_samples_window': 6, - 'wip': random.randint(1, 3), - } - ], - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - }) - - teams = [t['name'] for t in settings['progress_report_teams']] - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-outcome-tickets.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": random.randint(5, 8), + "max_throughput": random.randint(10, 15), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + { + "name": "Blue", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": "issuetype=story AND Team=Blue", + "throughput_samples_window": 6, + "wip": random.randint(1, 3), + }, + ], + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['teams']) == 2 + assert len(data["teams"]) == 2 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_and_outcome_as_tickets_no_forecast(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-no-forecast.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - }, { - 'name': 'Blue', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - } - ], - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - }) - - teams = [t['name'] for t in settings['progress_report_teams']] - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-no-forecast.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + { + "name": "Blue", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + ], + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['teams']) == 2 + assert len(data["teams"]) == 2 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_and_outcome_as_tickets_mixed_forecast(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-mixed-forecasts.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - }, { - 'name': 'Blue', - 'min_throughput': random.randint(3, 5), - 'max_throughput': random.randint(5, 7), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - } - ], - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - }) - - teams = [t['name'] for t in settings['progress_report_teams']] - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-mixed-forecasts.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + { + "name": "Blue", + "min_throughput": random.randint(3, 5), + "max_throughput": random.randint(5, 7), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + ], + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['teams']) == 2 + assert len(data["teams"]) == 2 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_minimal(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report_title': 'Acme Corp Websites', - 'progress_report': 'progress-minimal.html', - 'progress_report_epic_query_template': 'issuetype=epic', - 'progress_report_story_query_template': 'issuetype=story AND Epic={epic}', - 'progress_report_epic_deadline_field': None, - 'progress_report_epic_min_stories_field': None, - 'progress_report_epic_max_stories_field': None, - 'progress_report_epic_team_field': None, - 'progress_report_outcomes': [], - 'progress_report_outcome_query': None, - 'progress_report_outcome_deadline_field': None, - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': random.randint(5, 8), - 'max_throughput': random.randint(10, 15), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - }, - ], - }) - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report_title": "Acme Corp Websites", + "progress_report": "progress-minimal.html", + "progress_report_epic_query_template": "issuetype=epic", + "progress_report_story_query_template": "issuetype=story AND Epic={epic}", + "progress_report_epic_deadline_field": None, + "progress_report_epic_min_stories_field": None, + "progress_report_epic_max_stories_field": None, + "progress_report_epic_team_field": None, + "progress_report_outcomes": [], + "progress_report_outcome_query": None, + "progress_report_outcome_deadline_field": None, + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": random.randint(5, 8), + "max_throughput": random.randint(10, 15), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + ], + }, + ) + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] - query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=epics + stories), - settings=settings - ) + query_manager = QueryManager(jira=JIRA(fields=fields, filter_=simple_ql, issues=epics + stories), settings=settings) calculator = ProgressReportCalculator(query_manager, settings, results) @@ -1915,102 +1968,97 @@ def make_story(i): if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_minimal_no_forecast(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-minimal-no-forecast.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_epic_query_template': 'issuetype=epic', - 'progress_report_story_query_template': 'issuetype=story AND Epic={epic}', - 'progress_report_epic_deadline_field': None, - 'progress_report_epic_min_stories_field': None, - 'progress_report_epic_max_stories_field': None, - 'progress_report_epic_team_field': None, - 'progress_report_outcomes': [], - 'progress_report_outcome_query': None, - 'progress_report_outcome_deadline_field': None, - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - }, - ], - }) - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-minimal-no-forecast.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_epic_query_template": "issuetype=epic", + "progress_report_story_query_template": "issuetype=story AND Epic={epic}", + "progress_report_epic_deadline_field": None, + "progress_report_epic_min_stories_field": None, + "progress_report_epic_max_stories_field": None, + "progress_report_epic_team_field": None, + "progress_report_outcomes": [], + "progress_report_outcome_query": None, + "progress_report_outcome_deadline_field": None, + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + ], + }, + ) + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] - query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=epics + stories), - settings=settings - ) + query_manager = QueryManager(jira=JIRA(fields=fields, filter_=simple_ql, issues=epics + stories), settings=settings) calculator = ProgressReportCalculator(query_manager, settings, results) @@ -2021,332 +2069,340 @@ def make_story(i): if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_teams_no_outcomes(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-teams-no-outcomes.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': None, - 'max_throughput': None, - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - }, { - 'name': 'Blue', - 'min_throughput': random.randint(3, 5), - 'max_throughput': random.randint(5, 7), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': random.randint(1, 3), - } - ], - 'progress_report_epic_query_template': 'issuetype=epic', - 'progress_report_outcomes': [], - 'progress_report_outcome_query': None, - 'progress_report_outcome_deadline_field': None, - }) - - teams = [t['name'] for t in settings['progress_report_teams']] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=None, - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-teams-no-outcomes.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": None, + "max_throughput": None, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + { + "name": "Blue", + "min_throughput": random.randint(3, 5), + "max_throughput": random.randint(5, 7), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": random.randint(1, 3), + }, + ], + "progress_report_epic_query_template": "issuetype=epic", + "progress_report_outcomes": [], + "progress_report_outcome_query": None, + "progress_report_outcome_deadline_field": None, + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=None, + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] - query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=epics + stories), - settings=settings - ) + query_manager = QueryManager(jira=JIRA(fields=fields, filter_=simple_ql, issues=epics + stories), settings=settings) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['teams']) == 2 + assert len(data["teams"]) == 2 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_no_teams(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-no-teams.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': None, - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - 'progress_report_epic_team_field': None, - }) - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-no-teams.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": None, + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + "progress_report_epic_team_field": None, + }, + ) + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) data = calculator.run(trials=100) - assert len(data['teams']) == 0 + assert len(data["teams"]) == 0 results[ProgressReportCalculator] = data if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_dynamic_teams(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-dynamic-teams.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': None, - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - }) + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-dynamic-teams.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": None, + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + }, + ) teams = ["Alpha", "Beta", "Delta"] - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) @@ -2358,114 +2414,119 @@ def make_story(i): if WRITE_TEST_OUTPUTS: calculator.write() + def test_with_large_dataset_static_and_dynamic_teams(fields, settings, results): today = date.today() # build a large and partially randomised data set to forecast on - field_lookup = {v['name'].lower(): v['id'] for v in fields} + field_lookup = {v["name"].lower(): v["id"] for v in fields} def compare_value(i, clause): - key, val = [s.strip() for s in clause.split('=')] + key, val = [s.strip() for s in clause.split("=")] ival = getattr(i.fields, field_lookup.get(key.lower(), key), None) - ival = getattr(ival, 'value', ival) + ival = getattr(ival, "value", ival) return val.strip('"') == ival def simple_ql(i, jql): - clauses = [c.strip() for c in jql.split(' AND ') if "=" in c] + clauses = [c.strip() for c in jql.split(" AND ") if "=" in c] return all([compare_value(i, c) for c in clauses]) - - settings = extend_dict(settings, { - 'quantiles': [0.75, 0.85, 0.95], - 'progress_report': 'progress-mixed-teams.html', - 'progress_report_title': 'Acme Corp Websites', - 'progress_report_teams': [ - { - 'name': 'Red', - 'min_throughput': random.randint(3, 5), - 'max_throughput': random.randint(5, 7), - 'throughput_samples': None, - 'throughput_samples_window': None, - 'wip': 1, - }, - ], - 'progress_report_outcomes': [], - 'progress_report_outcome_query': 'issuetype=outcome', - 'progress_report_outcome_deadline_field': 'Deadline', - }) - - teams = [t['name'] for t in settings['progress_report_teams']] + ["Green", "Purple"] - - outcomes = [Issue("O-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Outcome', 'outcome'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - changes=[] - ) for i in range(random.randint(2, 4))] - - epics = [Issue("E-%d" % i, - summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), - issuetype=Value('Epic', 'epic'), - status=Value('In progress', 'in-progress'), - resolution=None, - resolutiondate=None, - created="%s 00:00:00" % random_date_past(today, 30).isoformat(), - customfield_001=random.choice(teams), - customfield_201=random.choice([o.key for o in outcomes]), - customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() if random.choice((True, True, False,)) else None, - customfield_203=random.randint(15, 20), - customfield_204=random.randint(20, 25), - changes=[] - ) for i in range(random.randint(9, 12))] + + settings = extend_dict( + settings, + { + "quantiles": [0.75, 0.85, 0.95], + "progress_report": "progress-mixed-teams.html", + "progress_report_title": "Acme Corp Websites", + "progress_report_teams": [ + { + "name": "Red", + "min_throughput": random.randint(3, 5), + "max_throughput": random.randint(5, 7), + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + ], + "progress_report_outcomes": [], + "progress_report_outcome_query": "issuetype=outcome", + "progress_report_outcome_deadline_field": "Deadline", + }, + ) + + teams = [t["name"] for t in settings["progress_report_teams"]] + ["Green", "Purple"] + + outcomes = [ + Issue( + "O-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Outcome", "outcome"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + changes=[], + ) + for i in range(random.randint(2, 4)) + ] + + epics = [ + Issue( + "E-%d" % i, + summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), + issuetype=Value("Epic", "epic"), + status=Value("In progress", "in-progress"), + resolution=None, + resolutiondate=None, + created="%s 00:00:00" % random_date_past(today, 30).isoformat(), + customfield_001=random.choice(teams), + customfield_201=random.choice([o.key for o in outcomes]), + customfield_202="%s 00:00:00" % random_date_future(today + timedelta(days=55), 65).isoformat() + if random.choice((True, True, False)) + else None, + customfield_203=random.randint(15, 20), + customfield_204=random.randint(20, 25), + changes=[], + ) + for i in range(random.randint(9, 12)) + ] def make_story(i): epic = random.choice(epics) current_status = random.choice(statuses) created = random_date_past(today, 15) - changes = [{ - 'date': created, - 'from': None, - 'to': statuses[0] - }] - - for s in statuses[1:]: - changes.append({ - 'date': random_date_future(changes[-1]['date'], 15), - 'from': changes[-1]['to'], - 'to': s, - }) - - if s == current_status: + changes = [{"date": created, "from": None, "to": statuses[0]}] + + for status in statuses[1:]: + changes.append( + {"date": random_date_future(changes[-1]["date"], 15), "from": changes[-1]["to"], "to": status} + ) + + if status == current_status: break - return Issue("S-%d" % i, + return Issue( + "S-%d" % i, summary="%s %s" % (random.choice(verbs).capitalize(), random.choice(nouns)), issuetype=Value("Story", "story"), status=Value(current_status, current_status.lower()), - resolution=Value('Done', 'done') if current_status == 'Done' else None, - resolutiondate="%s 00:00:00" % changes[-1]['date'] if current_status == 'Done' else None, + resolution=Value("Done", "done") if current_status == "Done" else None, + resolutiondate="%s 00:00:00" % changes[-1]["date"] if current_status == "Done" else None, created="%s 00:00:00" % created.isoformat(), customfield_001=epic.fields.customfield_001, customfield_205=epic.key, - changes=[ - Change( - "%s 00:00:00" % c['date'], - [("status", c['from'], c['to'],)] - ) for c in changes[1:] - ], + changes=[Change("%s 00:00:00" % c["date"], [("status", c["from"], c["to"])]) for c in changes[1:]], ) stories = [make_story(i) for i in range(100, 300)] query_manager = QueryManager( - jira=JIRA(fields=fields, filter=simple_ql, issues=outcomes + epics + stories), - settings=settings + jira=JIRA(fields=fields, filter_=simple_ql, issues=outcomes + epics + stories), settings=settings ) calculator = ProgressReportCalculator(query_manager, settings, results) diff --git a/jira_agile_metrics/calculators/scatterplot.py b/jira_agile_metrics/calculators/scatterplot.py index 13bac7e..eccee4d 100644 --- a/jira_agile_metrics/calculators/scatterplot.py +++ b/jira_agile_metrics/calculators/scatterplot.py @@ -1,15 +1,17 @@ import logging -import pandas as pd -import matplotlib.pyplot as plt + import matplotlib.dates as mdates +import matplotlib.pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import get_extension, set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class ScatterplotCalculator(Calculator): """Build scatterplot data for the cycle times: a data frame containing only those items in where values are set for `completed_timestamp` and @@ -21,101 +23,104 @@ class ScatterplotCalculator(Calculator): def run(self): cycle_data = self.get_result(CycleTimeCalculator) return calculate_scatterplot_data(cycle_data) - + def write(self): data = self.get_result() - if self.settings['scatterplot_data']: - self.write_file(data, self.settings['scatterplot_data']) + if self.settings["scatterplot_data"]: + self.write_file(data, self.settings["scatterplot_data"]) else: logger.debug("No output file specified for scatterplot data") - - if self.settings['scatterplot_chart']: - self.write_chart(data, self.settings['scatterplot_chart']) + + if self.settings["scatterplot_chart"]: + self.write_chart(data, self.settings["scatterplot_chart"]) else: logger.debug("No output file specified for scatterplot chart") - def write_file(self, data, output_files): + @staticmethod + def write_file(data, output_files): file_data = data.copy() - file_data['completed_date'] = file_data['completed_date'].map(pd.Timestamp.date) + file_data["completed_date"] = file_data["completed_date"].map(pd.Timestamp.date) for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing scatterplot data to %s", output_file) - if output_extension == '.json': - file_data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - file_data.to_excel(output_file, 'Scatter', index=False) + if output_extension == ".json": + file_data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + file_data.to_excel(output_file, "Scatter", index=False) else: file_data.to_csv(output_file, index=False) - + def write_chart(self, data, output_file): if len(data.index) < 2: logger.warning("Need at least 2 completed items to draw scatterplot") return - - chart_data = pd.DataFrame({ - 'completed_date': data['completed_date'].values.astype('datetime64[D]'), - 'cycle_time': data['cycle_time'] - }, index=data.index) - window = self.settings['scatterplot_window'] + chart_data = pd.DataFrame( + {"completed_date": data["completed_date"].values.astype("datetime64[D]"), "cycle_time": data["cycle_time"]}, + index=data.index, + ) + + window = self.settings["scatterplot_window"] if window: - start = chart_data['completed_date'].max().normalize() - pd.Timedelta(window, 'D') + start = chart_data["completed_date"].max().normalize() - pd.Timedelta(window, "D") chart_data = chart_data[chart_data.completed_date >= start] if len(data.index) < 2: logger.warning("Need at least 2 completed items to draw scatterplot") return - - quantiles = self.settings['quantiles'] - logger.debug("Showing forecast at quantiles %s", ', '.join(['%.2f' % (q * 100.0) for q in quantiles])) - + + quantiles = self.settings["quantiles"] + logger.debug("Showing forecast at quantiles %s", ", ".join(["%.2f" % (q * 100.0) for q in quantiles])) + fig, ax = plt.subplots() fig.autofmt_xdate() ax.set_xlabel("Completed date") ax.set_ylabel("Cycle time (days)") - if self.settings['scatterplot_chart_title']: - ax.set_title(self.settings['scatterplot_chart_title']) + if self.settings["scatterplot_chart_title"]: + ax.set_title(self.settings["scatterplot_chart_title"]) - ax.plot_date(x=chart_data['completed_date'], y=chart_data['cycle_time'], ms=5) - ax.xaxis.set_major_formatter(mdates.DateFormatter('%d/%m/%Y')) + ax.plot_date(x=chart_data["completed_date"], y=chart_data["cycle_time"], ms=5) + ax.xaxis.set_major_formatter(mdates.DateFormatter("%d/%m/%Y")) # Add quantiles left, right = ax.get_xlim() - for quantile, value in chart_data['cycle_time'].quantile(quantiles).iteritems(): - ax.hlines(value, left, right, linestyles='--', linewidths=1) - ax.annotate("%.0f%% (%.0f days)" % ((quantile * 100), value,), + for quantile, value in chart_data["cycle_time"].quantile(quantiles).iteritems(): + ax.hlines(value, left, right, linestyles="--", linewidths=1) + ax.annotate( + "%.0f%% (%.0f days)" % ((quantile * 100), value), xy=(left, value), xytext=(left, value + 0.5), fontsize="x-small", - ha="left" + ha="left", ) set_chart_style() # Write file logger.info("Writing scatterplot chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) + def calculate_scatterplot_data(cycle_data): columns = list(cycle_data.columns) - columns.remove('cycle_time') - columns.remove('completed_timestamp') - columns.remove('blocked_days') - columns.remove('impediments') - columns = ['completed_timestamp', 'cycle_time', 'blocked_days'] + columns + columns.remove("cycle_time") + columns.remove("completed_timestamp") + columns.remove("blocked_days") + columns.remove("impediments") + columns = ["completed_timestamp", "cycle_time", "blocked_days"] + columns data = ( cycle_data[columns] - .dropna(subset=['cycle_time', 'completed_timestamp']) - .rename(columns={'completed_timestamp': 'completed_date'}) + .dropna(subset=["cycle_time", "completed_timestamp"]) + .rename(columns={"completed_timestamp": "completed_date"}) ) - data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') + data["cycle_time"] = data["cycle_time"].astype("timedelta64[D]") return data diff --git a/jira_agile_metrics/calculators/scatterplot_test.py b/jira_agile_metrics/calculators/scatterplot_test.py index af7f885..084aecb 100644 --- a/jira_agile_metrics/calculators/scatterplot_test.py +++ b/jira_agile_metrics/calculators/scatterplot_test.py @@ -1,46 +1,47 @@ -import pytest from pandas import DataFrame, Timestamp +import pytest +from ..utils import extend_dict from .cycletime import CycleTimeCalculator from .scatterplot import ScatterplotCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): return extend_dict(minimal_settings, {}) -@pytest.fixture -def query_manager(minimal_query_manager): + +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(large_cycle_time_results): + +@pytest.fixture(name="results") +def fixture_results(large_cycle_time_results): return extend_dict(large_cycle_time_results, {}) + def test_empty(query_manager, settings, minimal_cycle_time_columns): - results = { - CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[]) - } + results = {CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[])} calculator = ScatterplotCalculator(query_manager, settings, results) data = calculator.run() assert list(data.columns) == [ - 'completed_date', - 'cycle_time', - 'blocked_days', - 'key', - 'url', - 'issue_type', - 'summary', - 'status', - 'resolution', - 'Backlog', - 'Committed', - 'Build', - 'Test', - 'Done' + "completed_date", + "cycle_time", + "blocked_days", + "key", + "url", + "issue_type", + "summary", + "status", + "resolution", + "Backlog", + "Committed", + "Build", + "Test", + "Done", ] assert len(data.index) == 0 @@ -51,32 +52,33 @@ def test_columns(query_manager, settings, results): data = calculator.run() assert list(data.columns) == [ - 'completed_date', - 'cycle_time', - 'blocked_days', - 'key', - 'url', - 'issue_type', - 'summary', - 'status', - 'resolution', - 'Backlog', - 'Committed', - 'Build', - 'Test', - 'Done' + "completed_date", + "cycle_time", + "blocked_days", + "key", + "url", + "issue_type", + "summary", + "status", + "resolution", + "Backlog", + "Committed", + "Build", + "Test", + "Done", ] + def test_calculate_scatterplot(query_manager, settings, results): calculator = ScatterplotCalculator(query_manager, settings, results) data = calculator.run() - assert data[['key', 'completed_date', 'cycle_time']].to_dict('records') == [ - {'key': 'A-13', 'completed_date': Timestamp('2018-01-07 00:00:00'), 'cycle_time': 5.0}, - {'key': 'A-14', 'completed_date': Timestamp('2018-01-07 00:00:00'), 'cycle_time': 5.0}, - {'key': 'A-15', 'completed_date': Timestamp('2018-01-08 00:00:00'), 'cycle_time': 5.0}, - {'key': 'A-16', 'completed_date': Timestamp('2018-01-08 00:00:00'), 'cycle_time': 5.0}, - {'key': 'A-17', 'completed_date': Timestamp('2018-01-09 00:00:00'), 'cycle_time': 5.0}, - {'key': 'A-18', 'completed_date': Timestamp('2018-01-09 00:00:00'), 'cycle_time': 4.0}, + assert data[["key", "completed_date", "cycle_time"]].to_dict("records") == [ + {"key": "A-13", "completed_date": Timestamp("2018-01-07 00:00:00"), "cycle_time": 5.0}, + {"key": "A-14", "completed_date": Timestamp("2018-01-07 00:00:00"), "cycle_time": 5.0}, + {"key": "A-15", "completed_date": Timestamp("2018-01-08 00:00:00"), "cycle_time": 5.0}, + {"key": "A-16", "completed_date": Timestamp("2018-01-08 00:00:00"), "cycle_time": 5.0}, + {"key": "A-17", "completed_date": Timestamp("2018-01-09 00:00:00"), "cycle_time": 5.0}, + {"key": "A-18", "completed_date": Timestamp("2018-01-09 00:00:00"), "cycle_time": 4.0}, ] diff --git a/jira_agile_metrics/calculators/throughput.py b/jira_agile_metrics/calculators/throughput.py index 0a8de64..aed891f 100644 --- a/jira_agile_metrics/calculators/throughput.py +++ b/jira_agile_metrics/calculators/throughput.py @@ -1,15 +1,17 @@ import logging -import pandas as pd + import matplotlib.pyplot as plt +import pandas as pd import statsmodels.formula.api as sm from ..calculator import Calculator from ..utils import get_extension, set_chart_style - from .cycletime import CycleTimeCalculator + logger = logging.getLogger(__name__) + class ThroughputCalculator(Calculator): """Build a data frame with columns `completed_timestamp` of the given frequency, and `count`, where count is the number of items @@ -18,108 +20,111 @@ class ThroughputCalculator(Calculator): def run(self): cycle_data = self.get_result(CycleTimeCalculator) - - frequency = self.settings['throughput_frequency'] - window = self.settings['throughput_window'] - + + frequency = self.settings["throughput_frequency"] + window = self.settings["throughput_window"] + logger.debug("Calculating throughput at frequency %s", frequency) return calculate_throughput(cycle_data, frequency, window) - + def write(self): data = self.get_result() - - if self.settings['throughput_data']: - self.write_file(data, self.settings['throughput_data']) + + if self.settings["throughput_data"]: + self.write_file(data, self.settings["throughput_data"]) else: logger.debug("No output file specified for throughput data") - - if self.settings['throughput_chart']: - self.write_chart(data, self.settings['throughput_chart']) + + if self.settings["throughput_chart"]: + self.write_chart(data, self.settings["throughput_chart"]) else: logger.debug("No output file specified for throughput chart") - def write_file(self, data, output_files): + @staticmethod + def write_file(data, output_files): for output_file in output_files: output_extension = get_extension(output_file) logger.info("Writing throughput data to %s", output_file) - if output_extension == '.json': - data.to_json(output_file, date_format='iso') - elif output_extension == '.xlsx': - data.to_excel(output_file, 'Throughput', header=True) + if output_extension == ".json": + data.to_json(output_file, date_format="iso") + elif output_extension == ".xlsx": + data.to_excel(output_file, "Throughput", header=True) else: data.to_csv(output_file, header=True) - + def write_chart(self, data, output_file): chart_data = data.copy() if len(chart_data.index) == 0: logger.warning("Cannot draw throughput chart with no completed items") return - + fig, ax = plt.subplots() - if self.settings['throughput_chart_title']: - ax.set_title(self.settings['throughput_chart_title']) + if self.settings["throughput_chart_title"]: + ax.set_title(self.settings["throughput_chart_title"]) # Calculate zero-indexed days to allow linear regression calculation day_zero = chart_data.index[0] - chart_data['day'] = (chart_data.index - day_zero).days + chart_data["day"] = (chart_data.index - day_zero).days - # Fit a linear regression (http://stackoverflow.com/questions/29960917/timeseries-fitted-values-from-trend-python) + # Fit a linear regression + # (http://stackoverflow.com/questions/29960917/timeseries-fitted-values-from-trend-python) fit = sm.ols(formula="count ~ day", data=chart_data).fit() - chart_data['fitted'] = fit.predict(chart_data) + chart_data["fitted"] = fit.predict(chart_data) # Plot ax.set_xlabel("Period starting") ax.set_ylabel("Number of items") - ax.plot(chart_data.index, chart_data['count'], marker='o') - plt.xticks(chart_data.index, [d.date().strftime('%d/%m/%Y') for d in chart_data.index], rotation=70, size='small') + ax.plot(chart_data.index, chart_data["count"], marker="o") + plt.xticks( + chart_data.index, [d.date().strftime("%d/%m/%Y") for d in chart_data.index], rotation=70, size="small" + ) _, top = ax.get_ylim() ax.set_ylim(0, top + 1) - for x, y in zip(chart_data.index, chart_data['count']): + for x, y in zip(chart_data.index, chart_data["count"]): if y == 0: continue - ax.annotate( - "%.0f" % y, - xy=(x.toordinal(), y + 0.2), - ha='center', - va='bottom', - fontsize="x-small", - ) + ax.annotate("%.0f" % y, xy=(x.toordinal(), y + 0.2), ha="center", va="bottom", fontsize="x-small") - ax.plot(chart_data.index, chart_data['fitted'], '--', linewidth=2) + ax.plot(chart_data.index, chart_data["fitted"], "--", linewidth=2) set_chart_style() # Write file logger.info("Writing throughput chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) + def calculate_throughput(cycle_data, frequency, window=None): if len(cycle_data.index) == 0: - return pd.DataFrame([], columns=['count'], index=[]) + return pd.DataFrame([], columns=["count"], index=[]) + + throughput = ( + cycle_data[["completed_timestamp", "key"]] + .rename(columns={"key": "count"}) + .groupby("completed_timestamp") + .count() + .resample(frequency) + .sum() + ) - throughput = cycle_data[['completed_timestamp', 'key']] \ - .rename(columns={'key': 'count'}) \ - .groupby('completed_timestamp').count() \ - .resample(frequency).sum() - # make sure we have 0 for periods with no throughput, and force to window if set window_start = throughput.index.min() window_end = throughput.index.max() - + if window: window_start = window_end - (pd.tseries.frequencies.to_offset(frequency) * (window - 1)) if window_start is pd.NaT or window_end is pd.NaT: - return pd.DataFrame([], columns=['count'], index=[]) - + return pd.DataFrame([], columns=["count"], index=[]) + return throughput.reindex(index=pd.date_range(start=window_start, end=window_end, freq=frequency)).fillna(0) diff --git a/jira_agile_metrics/calculators/throughput_test.py b/jira_agile_metrics/calculators/throughput_test.py index 3e25c53..79a9ef5 100644 --- a/jira_agile_metrics/calculators/throughput_test.py +++ b/jira_agile_metrics/calculators/throughput_test.py @@ -1,35 +1,33 @@ -import pytest from pandas import DataFrame +import pytest +from ..utils import extend_dict from .cycletime import CycleTimeCalculator from .throughput import ThroughputCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'throughput_frequency': 'D', - 'throughput_window': None, - }) +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict(minimal_settings, {"throughput_frequency": "D", "throughput_window": None}) + -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(large_cycle_time_results): + +@pytest.fixture(name="results") +def fixture_results(large_cycle_time_results): return extend_dict(large_cycle_time_results, {}) + def test_empty(query_manager, settings, minimal_cycle_time_columns): - results = { - CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[]) - } + results = {CycleTimeCalculator: DataFrame([], columns=minimal_cycle_time_columns, index=[])} calculator = ThroughputCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == ['count'] + assert list(data.columns) == ["count"] assert len(data.index) == 0 @@ -38,37 +36,34 @@ def test_columns(query_manager, settings, results): data = calculator.run() - assert list(data.columns) == ['count'] + assert list(data.columns) == ["count"] + def test_calculate_throughput(query_manager, settings, results): calculator = ThroughputCalculator(query_manager, settings, results) data = calculator.run() - assert data.to_dict('records') == [{'count': 2}, {'count': 2}, {'count': 2}] + assert data.to_dict("records") == [{"count": 2}, {"count": 2}, {"count": 2}] + def test_calculate_throughput_with_wider_window(query_manager, settings, results): - settings = extend_dict(settings, { - 'throughput_frequency': 'D', - 'throughput_window': 5, - }) + settings = extend_dict(settings, {"throughput_frequency": "D", "throughput_window": 5}) calculator = ThroughputCalculator(query_manager, settings, results) data = calculator.run() - assert data.to_dict('records') == [{'count': 0.0}, {'count': 0.0}, {'count': 2}, {'count': 2}, {'count': 2}] + assert data.to_dict("records") == [{"count": 0.0}, {"count": 0.0}, {"count": 2}, {"count": 2}, {"count": 2}] + def test_calculate_throughput_with_narrower_window(query_manager, settings, results): - settings = extend_dict(settings, { - 'throughput_frequency': 'D', - 'throughput_window': 2, - }) + settings = extend_dict(settings, {"throughput_frequency": "D", "throughput_window": 2}) calculator = ThroughputCalculator(query_manager, settings, results) data = calculator.run() - assert data.to_dict('records') == [{'count': 2}, {'count': 2}] + assert data.to_dict("records") == [{"count": 2}, {"count": 2}] diff --git a/jira_agile_metrics/calculators/waste.py b/jira_agile_metrics/calculators/waste.py index a796861..c54cc88 100644 --- a/jira_agile_metrics/calculators/waste.py +++ b/jira_agile_metrics/calculators/waste.py @@ -1,13 +1,16 @@ import logging + import dateutil -import pandas as pd from matplotlib import pyplot as plt +import pandas as pd from ..calculator import Calculator from ..utils import set_chart_style + logger = logging.getLogger(__name__) + class WasteCalculator(Calculator): """Calculate stories withdrawn, grouped by the time of withdrawal and stage prior to withdrawal. @@ -19,30 +22,27 @@ class WasteCalculator(Calculator): def run(self): - query = self.settings['waste_query'] + query = self.settings["waste_query"] if not query: logger.debug("Not calculating waste chart data as no query specified") return None - cycle_names = [s['name'] for s in self.settings['cycle']] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] - active_columns = cycle_names[cycle_names.index(committed_column):cycle_names.index(done_column)] + cycle_names = [s["name"] for s in self.settings["cycle"]] + committed_column = self.settings["committed_column"] + done_column = self.settings["done_column"] + active_columns = cycle_names[cycle_names.index(committed_column) : cycle_names.index(done_column)] cycle_lookup = {} - for idx, cycle_step in enumerate(self.settings['cycle']): - for status in cycle_step['statuses']: - cycle_lookup[status.lower()] = dict( - index=idx, - name=cycle_step['name'], - ) - - columns = ['key', 'last_status', 'resolution', 'withdrawn_date'] + for idx, cycle_step in enumerate(self.settings["cycle"]): + for status in cycle_step["statuses"]: + cycle_lookup[status.lower()] = dict(index=idx, name=cycle_step["name"]) + + columns = ["key", "last_status", "resolution", "withdrawn_date"] series = { - 'key': {'data': [], 'dtype': 'str'}, - 'last_status': {'data': [], 'dtype': 'str'}, - 'resolution': {'data': [], 'dtype': 'str'}, - 'withdrawn_date': {'data': [], 'dtype': 'datetime64[ns]'} + "key": {"data": [], "dtype": "str"}, + "last_status": {"data": [], "dtype": "str"}, + "resolution": {"data": [], "dtype": "str"}, + "withdrawn_date": {"data": [], "dtype": "datetime64[ns]"}, } for issue in self.query_manager.find_issues(query): @@ -52,12 +52,12 @@ def run(self): continue last_status = None - status_changes = list(self.query_manager.iter_changes(issue, ['status'])) + status_changes = list(self.query_manager.iter_changes(issue, ["status"])) if len(status_changes) > 0: last_status = status_changes[-1].from_string if last_status is not None and last_status.lower() in cycle_lookup: - last_status = cycle_lookup.get(last_status.lower())['name'] + last_status = cycle_lookup.get(last_status.lower())["name"] else: logger.warning("Issue %s transitioned from unknown JIRA status %s", issue.key, last_status) @@ -65,14 +65,14 @@ def run(self): if last_status not in active_columns: continue - series['key']['data'].append(issue.key) - series['last_status']['data'].append(last_status) - series['resolution']['data'].append(issue.fields.resolution.name) - series['withdrawn_date']['data'].append(dateutil.parser.parse(issue.fields.resolutiondate)) + series["key"]["data"].append(issue.key) + series["last_status"]["data"].append(last_status) + series["resolution"]["data"].append(issue.fields.resolution.name) + series["withdrawn_date"]["data"].append(dateutil.parser.parse(issue.fields.resolutiondate)) data = {} - for k, v in series.items(): - data[k] = pd.Series(v['data'], dtype=v['dtype']) + for key, value in series.items(): + data[key] = pd.Series(value["data"], dtype=value["dtype"]) return pd.DataFrame(data, columns=columns) @@ -81,7 +81,7 @@ def write(self): if chart_data is None: return - output_file = self.settings['waste_chart'] + output_file = self.settings["waste_chart"] if not output_file: logger.debug("No output file specified for waste chart") return @@ -90,19 +90,20 @@ def write(self): logger.warning("Cannot draw waste chart with zero items") return - frequency = self.settings['waste_frequency'] - window = self.settings['waste_window'] + frequency = self.settings["waste_frequency"] + window = self.settings["waste_window"] - cycle_names = [s['name'] for s in self.settings['cycle']] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] - active_columns = cycle_names[cycle_names.index(committed_column):cycle_names.index(done_column)] + cycle_names = [s["name"] for s in self.settings["cycle"]] + committed_column = self.settings["committed_column"] + done_column = self.settings["done_column"] + active_columns = cycle_names[cycle_names.index(committed_column) : cycle_names.index(done_column)] - breakdown = chart_data \ - .pivot_table(index='withdrawn_date', columns='last_status', values='key', aggfunc='count') \ - .groupby(pd.Grouper(freq=frequency, closed='left', label='left')) \ - .sum() \ - .reindex(active_columns, axis=1) + breakdown = ( + chart_data.pivot_table(index="withdrawn_date", columns="last_status", values="key", aggfunc="count") + .groupby(pd.Grouper(freq=frequency, closed="left", label="left")) + .sum() + .reindex(active_columns, axis=1) + ) if window: breakdown = breakdown[-window:] @@ -115,19 +116,19 @@ def write(self): breakdown.plot.bar(ax=ax, stacked=True) - if self.settings['waste_chart_title']: - ax.set_title(self.settings['waste_chart_title']) + if self.settings["waste_chart_title"]: + ax.set_title(self.settings["waste_chart_title"]) - ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + ax.legend(loc="center left", bbox_to_anchor=(1, 0.5)) ax.set_xlabel("Month", labelpad=20) ax.set_ylabel("Number of items", labelpad=10) labels = [d.strftime("%b %y") for d in breakdown.index] - ax.set_xticklabels(labels, rotation=90, size='small') + ax.set_xticklabels(labels, rotation=90, size="small") set_chart_style() # Write file logger.info("Writing waste chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/waste_test.py b/jira_agile_metrics/calculators/waste_test.py index 26f105a..ffe5e1f 100644 --- a/jira_agile_metrics/calculators/waste_test.py +++ b/jira_agile_metrics/calculators/waste_test.py @@ -1,137 +1,144 @@ -import pytest from pandas import Timestamp +import pytest -from ..conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxFieldValue as Value, - FauxChange as Change -) - -from ..utils import extend_dict - +from ..conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxFieldValue as Value, FauxChange as Change from ..querymanager import QueryManager +from ..utils import extend_dict from .waste import WasteCalculator -@pytest.fixture -def fields(minimal_fields): + +@pytest.fixture(name="fields") +def fixture_fields(minimal_fields): return minimal_fields + [] -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - 'waste_query': 'issueType = Story AND resolution IN (Withdrawn, Invalid)', - 'waste_window': 3, - 'waste_frequency': '2W-WED', - 'waste_chart': 'waste.png', - 'waste_chart_title': 'Waste', - }) - -@pytest.fixture -def jira(fields): - return JIRA(fields=fields, issues=[ - Issue("A-1", - summary="Withdrawn from QA", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-06 02:02:02", - changes=[ - Change("2018-01-03 02:02:02", [("status", "Backlog", "Next",)]), - Change("2018-01-04 02:02:02", [("status", "Next", "Build",)]), - Change("2018-01-05 02:02:02", [("status", "Build", "QA",)]), - Change("2018-01-06 02:02:02", [("status", "QA", "Done",)]), - ], - ), - Issue("A-2", - summary="Withdrawn from Next", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-07 02:02:02", - changes=[ - Change("2018-01-03 02:02:02", [("status", "Backlog", "Next",)]), - Change("2018-01-07 02:02:02", [("status", "Next", "Done",)]), - ], - ), - Issue("A-3", - summary="Withdrawn from Done", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-08 02:02:02", - changes=[ - Change("2018-01-03 02:02:02", [("status", "Backlog", "Next",)]), - Change("2018-01-04 02:02:02", [("status", "Next", "Build",)]), - Change("2018-01-05 02:02:02", [("status", "Build", "QA",)]), - Change("2018-01-06 02:02:02", [("status", "QA", "Done",)]), - Change("2018-01-08 02:02:02", [("status", "Done", "Done",)]), - ], - ), - Issue("A-4", - summary="Withdrawn from Backlog", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-09 02:02:02", - changes=[ - Change("2018-01-09 02:02:02", [("status", "Backlog", "Done",)]), - ], - ), - Issue("A-5", - summary="Unresolved", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=None, - resolutiondate=None, - changes=[ - Change("2018-01-03 02:02:02", [("status", "Backlog", "Next",)]), - Change("2018-01-04 02:02:02", [("status", "Next", "Build",)]), - Change("2018-01-05 02:02:02", [("status", "Build", "QA",)]), - Change("2018-01-06 02:02:02", [("status", "QA", "Done",)]), - ], - ), - Issue("A-6", - summary="Unmapped state", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-03 01:01:01", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-06 02:02:02", - changes=[ - Change("2018-01-03 02:02:02", [("status", "Backlog", "Next",)]), - Change("2018-01-04 02:02:02", [("status", "Next", "Build",)]), - Change("2018-01-05 02:02:02", [("status", "Build", "foobar",)]), - Change("2018-01-06 02:02:02", [("status", "foobar", "Done",)]), - ], - ), - Issue("A-7", - summary="No changes", - issuetype=Value("Story", "story"), - status=Value("Done", "done"), - created="2018-01-06 02:02:02", - resolution=Value("Withdrawn", "withdrawn"), - resolutiondate="2018-01-06 02:02:02", - changes=[], - ), - ]) + +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict( + minimal_settings, + { + "waste_query": "issueType = Story AND resolution IN (Withdrawn, Invalid)", + "waste_window": 3, + "waste_frequency": "2W-WED", + "waste_chart": "waste.png", + "waste_chart_title": "Waste", + }, + ) + + +@pytest.fixture(name="jira") +def fixture_jira(fields): + return JIRA( + fields=fields, + issues=[ + Issue( + "A-1", + summary="Withdrawn from QA", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-06 02:02:02", + changes=[ + Change("2018-01-03 02:02:02", [("status", "Backlog", "Next")]), + Change("2018-01-04 02:02:02", [("status", "Next", "Build")]), + Change("2018-01-05 02:02:02", [("status", "Build", "QA")]), + Change("2018-01-06 02:02:02", [("status", "QA", "Done")]), + ], + ), + Issue( + "A-2", + summary="Withdrawn from Next", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-07 02:02:02", + changes=[ + Change("2018-01-03 02:02:02", [("status", "Backlog", "Next")]), + Change("2018-01-07 02:02:02", [("status", "Next", "Done")]), + ], + ), + Issue( + "A-3", + summary="Withdrawn from Done", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-08 02:02:02", + changes=[ + Change("2018-01-03 02:02:02", [("status", "Backlog", "Next")]), + Change("2018-01-04 02:02:02", [("status", "Next", "Build")]), + Change("2018-01-05 02:02:02", [("status", "Build", "QA")]), + Change("2018-01-06 02:02:02", [("status", "QA", "Done")]), + Change("2018-01-08 02:02:02", [("status", "Done", "Done")]), + ], + ), + Issue( + "A-4", + summary="Withdrawn from Backlog", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-09 02:02:02", + changes=[Change("2018-01-09 02:02:02", [("status", "Backlog", "Done")])], + ), + Issue( + "A-5", + summary="Unresolved", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=None, + resolutiondate=None, + changes=[ + Change("2018-01-03 02:02:02", [("status", "Backlog", "Next")]), + Change("2018-01-04 02:02:02", [("status", "Next", "Build")]), + Change("2018-01-05 02:02:02", [("status", "Build", "QA")]), + Change("2018-01-06 02:02:02", [("status", "QA", "Done")]), + ], + ), + Issue( + "A-6", + summary="Unmapped state", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-03 01:01:01", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-06 02:02:02", + changes=[ + Change("2018-01-03 02:02:02", [("status", "Backlog", "Next")]), + Change("2018-01-04 02:02:02", [("status", "Next", "Build")]), + Change("2018-01-05 02:02:02", [("status", "Build", "foobar")]), + Change("2018-01-06 02:02:02", [("status", "foobar", "Done")]), + ], + ), + Issue( + "A-7", + summary="No changes", + issuetype=Value("Story", "story"), + status=Value("Done", "done"), + created="2018-01-06 02:02:02", + resolution=Value("Withdrawn", "withdrawn"), + resolutiondate="2018-01-06 02:02:02", + changes=[], + ), + ], + ) + def test_no_query(jira, settings): query_manager = QueryManager(jira, settings) results = {} - settings = extend_dict(settings, { - 'waste_query': None - }) + settings = extend_dict(settings, {"waste_query": None}) calculator = WasteCalculator(query_manager, settings, results) data = calculator.run() assert data is None + def test_columns(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -139,7 +146,8 @@ def test_columns(jira, settings): data = calculator.run() - assert list(data.columns) == ['key', 'last_status', 'resolution', 'withdrawn_date'] + assert list(data.columns) == ["key", "last_status", "resolution", "withdrawn_date"] + def test_empty(fields, settings): jira = JIRA(fields=fields, issues=[]) @@ -151,6 +159,7 @@ def test_empty(fields, settings): assert len(data.index) == 0 + def test_query(jira, settings): query_manager = QueryManager(jira, settings) results = {} @@ -158,16 +167,24 @@ def test_query(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-1', 'last_status': 'Test', 'resolution': 'Withdrawn', 'withdrawn_date': Timestamp('2018-01-06 02:02:02')}, - {'key': 'A-2', 'last_status': 'Committed', 'resolution': 'Withdrawn', 'withdrawn_date': Timestamp('2018-01-07 02:02:02')}, + assert data.to_dict("records") == [ + { + "key": "A-1", + "last_status": "Test", + "resolution": "Withdrawn", + "withdrawn_date": Timestamp("2018-01-06 02:02:02"), + }, + { + "key": "A-2", + "last_status": "Committed", + "resolution": "Withdrawn", + "withdrawn_date": Timestamp("2018-01-07 02:02:02"), + }, ] + def test_different_backlog_column(jira, settings): - settings = extend_dict(settings, { - 'backlog_column': 'Committed', - 'committed_column': 'Build', - }) + settings = extend_dict(settings, {"backlog_column": "Committed", "committed_column": "Build"}) query_manager = QueryManager(jira, settings) results = {} @@ -175,14 +192,18 @@ def test_different_backlog_column(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-1', 'last_status': 'Test', 'resolution': 'Withdrawn', 'withdrawn_date': Timestamp('2018-01-06 02:02:02')}, + assert data.to_dict("records") == [ + { + "key": "A-1", + "last_status": "Test", + "resolution": "Withdrawn", + "withdrawn_date": Timestamp("2018-01-06 02:02:02"), + }, ] + def test_different_done_column(jira, settings): - settings = extend_dict(settings, { - 'done_column': 'Test' - }) + settings = extend_dict(settings, {"done_column": "Test"}) query_manager = QueryManager(jira, settings) results = {} @@ -190,6 +211,11 @@ def test_different_done_column(jira, settings): data = calculator.run() - assert data.to_dict('records') == [ - {'key': 'A-2', 'last_status': 'Committed', 'resolution': 'Withdrawn', 'withdrawn_date': Timestamp('2018-01-07 02:02:02')}, + assert data.to_dict("records") == [ + { + "key": "A-2", + "last_status": "Committed", + "resolution": "Withdrawn", + "withdrawn_date": Timestamp("2018-01-07 02:02:02"), + }, ] diff --git a/jira_agile_metrics/calculators/wip.py b/jira_agile_metrics/calculators/wip.py index 3a65f6c..c6e1893 100644 --- a/jira_agile_metrics/calculators/wip.py +++ b/jira_agile_metrics/calculators/wip.py @@ -1,29 +1,29 @@ import logging + import matplotlib.pyplot as plt import pandas as pd from ..calculator import Calculator from ..utils import set_chart_style - from .cfd import CFDCalculator + logger = logging.getLogger(__name__) + class WIPChartCalculator(Calculator): - """Draw a weekly WIP chart - """ + """Draw a weekly WIP chart""" def run(self): cfd_data = self.get_result(CFDCalculator) - cycle_names = [s['name'] for s in self.settings['cycle']] - committed_column = self.settings['committed_column'] - done_column = self.settings['done_column'] + committed_column = self.settings["committed_column"] + done_column = self.settings["done_column"] - return pd.DataFrame({'wip': cfd_data[committed_column] - cfd_data[done_column]}, index=cfd_data.index) + return pd.DataFrame({"wip": cfd_data[committed_column] - cfd_data[done_column]}, index=cfd_data.index) def write(self): - output_file = self.settings['wip_chart'] + output_file = self.settings["wip_chart"] if not output_file: logger.debug("No output file specified for WIP chart") return @@ -36,15 +36,15 @@ def write(self): fig, ax = plt.subplots() - if self.settings['wip_chart_title']: - ax.set_title(self.settings['wip_chart_title']) + if self.settings["wip_chart_title"]: + ax.set_title(self.settings["wip_chart_title"]) - frequency = self.settings['wip_frequency'] + frequency = self.settings["wip_frequency"] logger.debug("Calculating WIP chart with frequency %s", frequency) - wip_data = chart_data[['wip']] + wip_data = chart_data[["wip"]] - window = self.settings['wip_window'] + window = self.settings["wip_window"] if window: start = wip_data.index.max().normalize() - (window * pd.tseries.frequencies.to_offset(frequency)) wip_data = wip_data[start:] @@ -53,11 +53,11 @@ def write(self): logger.warning("Need at least 2 completed items to draw scatterplot") return - groups = wip_data.groupby(pd.Grouper(freq=frequency, label='left', closed='left')) + groups = wip_data.groupby(pd.Grouper(freq=frequency, label="left", closed="left")) labels = [x[0].strftime("%d/%m/%Y") for x in groups] - groups.boxplot(subplots=False, ax=ax, showmeans=True, return_type='axes') - ax.set_xticklabels(labels, rotation=70, size='small') + groups.boxplot(subplots=False, ax=ax, showmeans=True, return_type="axes") + ax.set_xticklabels(labels, rotation=70, size="small") ax.set_xlabel("Period starting") ax.set_ylabel("WIP") @@ -66,5 +66,5 @@ def write(self): # Write file logger.info("Writing WIP chart to %s", output_file) - fig.savefig(output_file, bbox_inches='tight', dpi=300) + fig.savefig(output_file, bbox_inches="tight", dpi=300) plt.close(fig) diff --git a/jira_agile_metrics/calculators/wip_test.py b/jira_agile_metrics/calculators/wip_test.py index 7a96009..1a38be5 100644 --- a/jira_agile_metrics/calculators/wip_test.py +++ b/jira_agile_metrics/calculators/wip_test.py @@ -1,24 +1,26 @@ -import pytest import datetime + from pandas import DataFrame, Timestamp, date_range +import pytest +from ..utils import extend_dict from .cfd import CFDCalculator from .wip import WIPChartCalculator -from ..utils import extend_dict -@pytest.fixture -def settings(minimal_settings): - return extend_dict(minimal_settings, { - }) +@pytest.fixture(name="settings") +def fixture_settings(minimal_settings): + return extend_dict(minimal_settings, {}) + -@pytest.fixture -def query_manager(minimal_query_manager): +@pytest.fixture(name="query_manager") +def fixture_query_manager(minimal_query_manager): return minimal_query_manager -@pytest.fixture -def results(query_manager, settings, large_cycle_time_results): - + +@pytest.fixture(name="results") +def fixture_results(query_manager, settings, large_cycle_time_results): + # CFD data frame and WIP # # Backlog Committed Build Test Done @@ -33,19 +35,25 @@ def results(query_manager, settings, large_cycle_time_results): # 2018-01-09 18.0 15.0 12.0 9.0 6.0 --> 9 # - return extend_dict(large_cycle_time_results, { - CFDCalculator: CFDCalculator(query_manager, settings, large_cycle_time_results).run() - }) + return extend_dict( + large_cycle_time_results, + {CFDCalculator: CFDCalculator(query_manager, settings, large_cycle_time_results).run()}, + ) + def test_empty(query_manager, settings, minimal_cycle_time_columns): results = { - CFDCalculator: DataFrame([], columns=['Backlog', 'Committed', 'Build', 'Test', 'Done'], index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq='D')) + CFDCalculator: DataFrame( + [], + columns=["Backlog", "Committed", "Build", "Test", "Done"], + index=date_range(start=datetime.date(2018, 1, 1), periods=0, freq="D"), + ) } calculator = WIPChartCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == ['wip'] + assert list(data.columns) == ["wip"] assert len(data.index) == 0 @@ -53,7 +61,8 @@ def test_columns(query_manager, settings, results): calculator = WIPChartCalculator(query_manager, settings, results) data = calculator.run() - assert list(data.columns) == ['wip'] + assert list(data.columns) == ["wip"] + def test_calculate_wip(query_manager, settings, results): calculator = WIPChartCalculator(query_manager, settings, results) @@ -61,60 +70,58 @@ def test_calculate_wip(query_manager, settings, results): data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D'), - Timestamp('2018-01-07 00:00:00', freq='D'), - Timestamp('2018-01-08 00:00:00', freq='D'), - Timestamp('2018-01-09 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), + Timestamp("2018-01-07 00:00:00", freq="D"), + Timestamp("2018-01-08 00:00:00", freq="D"), + Timestamp("2018-01-09 00:00:00", freq="D"), ] - assert data.to_dict('records') == [ - {'wip': 0.0}, - {'wip': 9.0}, - {'wip': 13.0}, - {'wip': 14.0}, - {'wip': 15.0}, - {'wip': 15.0}, - {'wip': 13.0}, - {'wip': 11.0}, - {'wip': 9.0}, + assert data.to_dict("records") == [ + {"wip": 0.0}, + {"wip": 9.0}, + {"wip": 13.0}, + {"wip": 14.0}, + {"wip": 15.0}, + {"wip": 15.0}, + {"wip": 13.0}, + {"wip": 11.0}, + {"wip": 9.0}, ] + def test_calculate_wip_different_columns(query_manager, settings, results): - settings.update({ - 'committed_column': 'Build', - 'done_column': 'Test', - }) + settings.update({"committed_column": "Build", "done_column": "Test"}) calculator = WIPChartCalculator(query_manager, settings, results) data = calculator.run() assert list(data.index) == [ - Timestamp('2018-01-01 00:00:00', freq='D'), - Timestamp('2018-01-02 00:00:00', freq='D'), - Timestamp('2018-01-03 00:00:00', freq='D'), - Timestamp('2018-01-04 00:00:00', freq='D'), - Timestamp('2018-01-05 00:00:00', freq='D'), - Timestamp('2018-01-06 00:00:00', freq='D'), - Timestamp('2018-01-07 00:00:00', freq='D'), - Timestamp('2018-01-08 00:00:00', freq='D'), - Timestamp('2018-01-09 00:00:00', freq='D') + Timestamp("2018-01-01 00:00:00", freq="D"), + Timestamp("2018-01-02 00:00:00", freq="D"), + Timestamp("2018-01-03 00:00:00", freq="D"), + Timestamp("2018-01-04 00:00:00", freq="D"), + Timestamp("2018-01-05 00:00:00", freq="D"), + Timestamp("2018-01-06 00:00:00", freq="D"), + Timestamp("2018-01-07 00:00:00", freq="D"), + Timestamp("2018-01-08 00:00:00", freq="D"), + Timestamp("2018-01-09 00:00:00", freq="D"), ] - assert data.to_dict('records') == [ - {'wip': 0.0}, - {'wip': 0.0}, - {'wip': 8.0}, - {'wip': 7.0}, - {'wip': 3.0}, - {'wip': 4.0}, - {'wip': 4.0}, - {'wip': 3.0}, - {'wip': 3.0}, + assert data.to_dict("records") == [ + {"wip": 0.0}, + {"wip": 0.0}, + {"wip": 8.0}, + {"wip": 7.0}, + {"wip": 3.0}, + {"wip": 4.0}, + {"wip": 4.0}, + {"wip": 3.0}, + {"wip": 3.0}, ] diff --git a/jira_agile_metrics/cli.py b/jira_agile_metrics/cli.py index 5cf201f..cf958b9 100644 --- a/jira_agile_metrics/cli.py +++ b/jira_agile_metrics/cli.py @@ -1,45 +1,64 @@ -import os import argparse import getpass import logging +import os from jira import JIRA from .config import config_to_options, CALCULATORS -from .webapp.app import app as webapp -from .querymanager import QueryManager from .calculator import run_calculators +from .querymanager import QueryManager from .utils import set_chart_context +from .webapp.app import app as webapp + logger = logging.getLogger(__name__) + def configure_argument_parser(): - """Configure an ArgumentParser that manages command line options. - """ + """Configure an ArgumentParser that manages command line options.""" - parser = argparse.ArgumentParser(description='Extract Agile metrics data from JIRA and produce data and charts.') + parser = argparse.ArgumentParser(description="Extract Agile metrics data from JIRA and produce data and charts.") # Basic options - parser.add_argument('config', metavar='config.yml', nargs='?', help='Configuration file') - parser.add_argument('-v', dest='verbose', action='store_true', help='Verbose output') - parser.add_argument('-vv', dest='very_verbose', action='store_true', help='Even more verbose output') - parser.add_argument('-n', metavar='N', dest='max_results', type=int, help='Only fetch N most recently updated issues') - - parser.add_argument('--server', metavar='127.0.0.1:8080', help='Run as a web server instead of a command line tool, on the given host and/or port. The remaining options do not apply.') + parser.add_argument("config", metavar="config.yml", nargs="?", help="Configuration file") + parser.add_argument("-v", dest="verbose", action="store_true", help="Verbose output") + parser.add_argument("-vv", dest="very_verbose", action="store_true", help="Even more verbose output") + parser.add_argument( + "-n", metavar="N", dest="max_results", type=int, help="Only fetch N most recently updated issues" + ) + + parser.add_argument( + "--server", + metavar="127.0.0.1:8080", + help="Run as a web server instead of a command line tool, on the given host and/or port. " + "The remaining options do not apply.", + ) # Output directory - parser.add_argument('--output-directory', '-o', metavar='metrics', help="Write output files to this directory, rather than the current working directory.") + parser.add_argument( + "--output-directory", + "-o", + metavar="metrics", + help="Write output files to this directory, rather than the current working directory.", + ) # Connection options - parser.add_argument('--domain', metavar='https://my.jira.com', help='JIRA domain name') - parser.add_argument('--username', metavar='user', help='JIRA user name') - parser.add_argument('--password', metavar='password', help='JIRA password') - parser.add_argument('--http-proxy', metavar='https://proxy.local', help='URL to HTTP Proxy') - parser.add_argument('--https-proxy', metavar='https://proxy.local', help='URL to HTTPS Proxy') - parser.add_argument('--jira-server-version-check', type=bool, metavar='True', help='If true it will fetch JIRA server version info first to determine if some API calls are available') - + parser.add_argument("--domain", metavar="https://my.jira.com", help="JIRA domain name") + parser.add_argument("--username", metavar="user", help="JIRA user name") + parser.add_argument("--password", metavar="password", help="JIRA password") + parser.add_argument("--http-proxy", metavar="https://proxy.local", help="URL to HTTP Proxy") + parser.add_argument("--https-proxy", metavar="https://proxy.local", help="URL to HTTPS Proxy") + parser.add_argument( + "--jira-server-version-check", + type=bool, + metavar="True", + help="If true it will fetch JIRA server version info first to determine if some API calls are available", + ) + return parser + def main(): parser = configure_argument_parser() args = parser.parse_args() @@ -49,30 +68,28 @@ def main(): else: run_command_line(parser, args) + def run_server(parser, args): host = None port = args.server - - if ':' in args.server: - (host, port) = args.server.split(':') + + if ":" in args.server: + (host, port) = args.server.split(":") port = int(port) set_chart_context("paper") webapp.run(host=host, port=port) + def run_command_line(parser, args): if not args.config: parser.print_usage() return - + logging.basicConfig( - format='[%(asctime)s %(levelname)s] %(message)s', - datefmt='%Y-%m-%d %H:%M:%S', - level=( - logging.DEBUG if args.very_verbose else - logging.INFO if args.verbose else - logging.WARNING - ) + format="[%(asctime)s %(levelname)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + level=(logging.DEBUG if args.very_verbose else logging.INFO if args.verbose else logging.WARNING), ) # Configuration and settings (command line arguments override config file options) @@ -82,24 +99,25 @@ def run_command_line(parser, args): options = config_to_options(config.read(), cwd=os.path.dirname(os.path.abspath(args.config))) # Allow command line arguments to override options - override_options(options['connection'], args) - override_options(options['settings'], args) + override_options(options["connection"], args) + override_options(options["settings"], args) # Set charting context, which determines how charts are rendered set_chart_context("paper") # Set output directory if required if args.output_directory: - logger.info("Changing working directory to %s" % args.output_directory) + logger.info("Changing working directory to %s", args.output_directory) os.chdir(args.output_directory) # Query JIRA and run calculators - jira = get_jira_client(options['connection']) + jira = get_jira_client(options["connection"]) logger.info("Running calculators") - query_manager = QueryManager(jira, options['settings']) - run_calculators(CALCULATORS, query_manager, options['settings']) + query_manager = QueryManager(jira, options["settings"]) + run_calculators(CALCULATORS, query_manager, options["settings"]) + def override_options(options, arguments): """Update `options` dict with settings from `arguments` @@ -109,15 +127,16 @@ def override_options(options, arguments): if getattr(arguments, key, None) is not None: options[key] = getattr(arguments, key) + def get_jira_client(connection): - url = connection['domain'] - username = connection['username'] - password = connection['password'] - http_proxy = connection['http_proxy'] - https_proxy = connection['https_proxy'] - jira_server_version_check = connection['jira_server_version_check'] + url = connection["domain"] + username = connection["username"] + password = connection["password"] + http_proxy = connection["http_proxy"] + https_proxy = connection["https_proxy"] + jira_server_version_check = connection["jira_server_version_check"] - jira_client_options = connection['jira_client_options'] + jira_client_options = connection["jira_client_options"] logger.info("Connecting to %s", url) @@ -127,15 +146,15 @@ def get_jira_client(connection): if not password: password = getpass.getpass("Password: ") - options = {'server': url} + options = {"server": url} proxies = None if http_proxy or https_proxy: proxies = {} if http_proxy: - proxies['http'] = http_proxy + proxies["http"] = http_proxy if https_proxy: - proxies['https'] = https_proxy + proxies["https"] = https_proxy options.update(jira_client_options) diff --git a/jira_agile_metrics/cli_test.py b/jira_agile_metrics/cli_test.py index ffa157d..0241967 100644 --- a/jira_agile_metrics/cli_test.py +++ b/jira_agile_metrics/cli_test.py @@ -1,25 +1,23 @@ import json -from .cli import ( - override_options -) +from .cli import override_options -def test_override_options(): +def test_override_options(): class FauxArgs: def __init__(self, opts): self.__dict__.update(opts) - for k, v in opts.items(): - setattr(self, k, v) + for key, value in opts.items(): + setattr(self, key, value) - options = {'one': 1, 'two': 2} + options = {"one": 1, "two": 2} override_options(options, FauxArgs({})) - assert json.dumps(options) == json.dumps({'one': 1, 'two': 2}) - - options = {'one': 1, 'two': 2} - override_options(options, FauxArgs({'one': 11})) - assert json.dumps(options) == json.dumps({'one': 11, 'two': 2}) + assert json.dumps(options) == json.dumps({"one": 1, "two": 2}) + + options = {"one": 1, "two": 2} + override_options(options, FauxArgs({"one": 11})) + assert json.dumps(options) == json.dumps({"one": 11, "two": 2}) - options = {'one': 1, 'two': 2} - override_options(options, FauxArgs({'three': 3})) - assert json.dumps(options) == json.dumps({'one': 1, 'two': 2}) + options = {"one": 1, "two": 2} + override_options(options, FauxArgs({"three": 3})) + assert json.dumps(options) == json.dumps({"one": 1, "two": 2}) diff --git a/jira_agile_metrics/config.py b/jira_agile_metrics/config.py index 2e7cab1..a2a2856 100644 --- a/jira_agile_metrics/config.py +++ b/jira_agile_metrics/config.py @@ -1,32 +1,31 @@ -import logging -import yaml import datetime +import logging import os.path from pydicti import odicti +import yaml -from .utils import StatusTypes - -from .calculators.cycletime import CycleTimeCalculator +from .calculators.ageingwip import AgeingWIPChartCalculator +from .calculators.burnup import BurnupCalculator from .calculators.cfd import CFDCalculator -from .calculators.scatterplot import ScatterplotCalculator +from .calculators.cycletime import CycleTimeCalculator +from .calculators.debt import DebtCalculator +from .calculators.defects import DefectsCalculator +from .calculators.forecast import BurnupForecastCalculator from .calculators.histogram import HistogramCalculator +from .calculators.impediments import ImpedimentsCalculator +from .calculators.netflow import NetFlowChartCalculator from .calculators.percentiles import PercentilesCalculator +from .calculators.progressreport import ProgressReportCalculator +from .calculators.scatterplot import ScatterplotCalculator from .calculators.throughput import ThroughputCalculator -from .calculators.burnup import BurnupCalculator -from .calculators.wip import WIPChartCalculator -from .calculators.netflow import NetFlowChartCalculator -from .calculators.ageingwip import AgeingWIPChartCalculator -from .calculators.forecast import BurnupForecastCalculator -from .calculators.impediments import ImpedimentsCalculator -from .calculators.debt import DebtCalculator -from .calculators.defects import DefectsCalculator from .calculators.waste import WasteCalculator -from .calculators.progressreport import ProgressReportCalculator +from .calculators.wip import WIPChartCalculator + CALCULATORS = ( CycleTimeCalculator, # should come first -- others depend on results from this one - CFDCalculator, # needs to come before burn-up charts, wip charts, and net flow charts + CFDCalculator, # needs to come before burn-up charts, wip charts, and net flow charts ScatterplotCalculator, HistogramCalculator, PercentilesCalculator, @@ -45,481 +44,514 @@ logger = logging.getLogger(__name__) + class ConfigError(Exception): pass + # From http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts -def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=odicti): - class OrderedLoader(Loader): +def ordered_load(stream, loader=yaml.Loader, object_pairs_hook=odicti): + class OrderedLoader(loader): pass - def construct_mapping(loader, node): - loader.flatten_mapping(node) - return object_pairs_hook(loader.construct_pairs(node)) + def construct_mapping(loader_, node): + loader_.flatten_mapping(node) + return object_pairs_hook(loader_.construct_pairs(node)) - OrderedLoader.add_constructor( - yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - construct_mapping - ) + OrderedLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, construct_mapping) return yaml.load(stream, OrderedLoader) + def force_list(val): - return list(val) if isinstance(val, (list, tuple,)) else [val] + return list(val) if isinstance(val, (list, tuple)) else [val] + def force_int(key, value): try: return int(value) except ValueError: - raise ConfigError("Could not convert value `%s` for key `%s` to integer" % (value, expand_key(key),)) from None + raise ConfigError("Could not convert value `%s` for key `%s` to integer" % (value, expand_key(key))) from None + def force_float(key, value): try: return float(value) except ValueError: - raise ConfigError("Could not convert value `%s` for key `%s` to decimal" % (value, expand_key(key),)) from None + raise ConfigError("Could not convert value `%s` for key `%s` to decimal" % (value, expand_key(key))) from None + def force_date(key, value): if not isinstance(value, datetime.date): - raise ConfigError("Value `%s` for key `%s` is not a date" % (value, expand_key(key),)) + raise ConfigError("Value `%s` for key `%s` is not a date" % (value, expand_key(key))) return value + def expand_key(key): - return str(key).replace('_', ' ').lower() + return str(key).replace("_", " ").lower() + def to_progress_report_teams_list(value): - return [{ - 'name': val[expand_key('name')] if expand_key('name') in val else None, - 'wip': force_int('wip', val[expand_key('wip')]) if expand_key('wip') in val else 1, - 'min_throughput': force_int('min_throughput', val[expand_key('min_throughput')]) if expand_key('min_throughput') in val else None, - 'max_throughput': force_int('max_throughput', val[expand_key('max_throughput')]) if expand_key('max_throughput') in val else None, - 'throughput_samples': val[expand_key('throughput_samples')] if expand_key('throughput_samples') in val else None, - 'throughput_samples_window': force_int('throughput_samples_window', val[expand_key('throughput_samples_window')]) if expand_key('throughput_samples_window') in val else None, - } for val in value] + return [ + { + "name": val[expand_key("name")] if expand_key("name") in val else None, + "wip": force_int("wip", val[expand_key("wip")]) if expand_key("wip") in val else 1, + "min_throughput": force_int("min_throughput", val[expand_key("min_throughput")]) + if expand_key("min_throughput") in val + else None, + "max_throughput": force_int("max_throughput", val[expand_key("max_throughput")]) + if expand_key("max_throughput") in val + else None, + "throughput_samples": val[expand_key("throughput_samples")] + if expand_key("throughput_samples") in val + else None, + "throughput_samples_window": force_int( + "throughput_samples_window", val[expand_key("throughput_samples_window")] + ) + if expand_key("throughput_samples_window") in val + else None, + } + for val in value + ] + def to_progress_report_outcomes_list(value): - return [{ - 'name': val[expand_key('name')] if expand_key('name') in val else None, - 'key': val[expand_key('key')] if expand_key('key') in val else None, - 'deadline': force_date('deadline', val[expand_key('deadline')]) if expand_key('deadline') in val else None, - 'epic_query': val[expand_key('epic_query')] if expand_key('epic_query') in val else None, - } for val in value] + return [ + { + "name": val[expand_key("name")] if expand_key("name") in val else None, + "key": val[expand_key("key")] if expand_key("key") in val else None, + "deadline": force_date("deadline", val[expand_key("deadline")]) if expand_key("deadline") in val else None, + "epic_query": val[expand_key("epic_query")] if expand_key("epic_query") in val else None, + } + for val in value + ] def config_to_options(data, cwd=None, extended=False): try: config = ordered_load(data, yaml.SafeLoader) - except Exception as e: - raise ConfigError("Unable to parse YAML configuration file.") from e + except Exception as ex: + raise ConfigError("Unable to parse YAML configuration file.") from ex if config is None: raise ConfigError("Configuration file is empty") from None options = { - 'connection': { - 'domain': None, - 'username': None, - 'password': None, - 'http_proxy': None, - 'https_proxy': None, - 'jira_server_version_check': True, - 'jira_client_options': {} + "connection": { + "domain": None, + "username": None, + "password": None, + "http_proxy": None, + "https_proxy": None, + "jira_server_version_check": True, + "jira_client_options": {}, + }, + "settings": { + "queries": [], + "query_attribute": None, + "attributes": {}, + "known_values": {}, + "cycle": [], + "max_results": None, + "verbose": False, + "quantiles": [0.5, 0.85, 0.95], + "backlog_column": None, + "committed_column": None, + "done_column": None, + "cycle_time_data": None, + "percentiles_data": None, + "scatterplot_window": None, + "scatterplot_data": None, + "scatterplot_chart": None, + "scatterplot_chart_title": None, + "histogram_window": None, + "histogram_data": None, + "histogram_chart": None, + "histogram_chart_title": None, + "cfd_window": None, + "cfd_data": None, + "cfd_chart": None, + "cfd_chart_title": None, + "throughput_frequency": "1W-MON", + "throughput_window": None, + "throughput_data": None, + "throughput_chart": None, + "throughput_chart_title": None, + "burnup_window": None, + "burnup_chart": None, + "burnup_chart_title": None, + "burnup_forecast_window": None, + "burnup_forecast_chart": None, + "burnup_forecast_chart_title": None, + "burnup_forecast_chart_target": None, + "burnup_forecast_chart_deadline": None, + "burnup_forecast_chart_deadline_confidence": None, + "burnup_forecast_chart_trials": 100, + "burnup_forecast_chart_throughput_window": 60, + "burnup_forecast_chart_throughput_window_end": None, + "wip_frequency": "1W-MON", + "wip_window": None, + "wip_chart": None, + "wip_chart_title": None, + "ageing_wip_chart": None, + "ageing_wip_chart_title": None, + "net_flow_frequency": "1W-MON", + "net_flow_window": None, + "net_flow_chart": None, + "net_flow_chart_title": None, + "impediments_data": None, + "impediments_window": None, + "impediments_chart": None, + "impediments_chart_title": None, + "impediments_days_chart": None, + "impediments_days_chart_title": None, + "impediments_status_chart": None, + "impediments_status_chart_title": None, + "impediments_status_days_chart": None, + "impediments_status_days_chart_title": None, + "defects_query": None, + "defects_window": None, + "defects_priority_field": None, + "defects_priority_values": None, + "defects_type_field": None, + "defects_type_values": None, + "defects_environment_field": None, + "defects_environment_values": None, + "defects_by_priority_chart": None, + "defects_by_priority_chart_title": None, + "defects_by_type_chart": None, + "defects_by_type_chart_title": None, + "defects_by_environment_chart": None, + "defects_by_environment_chart_title": None, + "debt_query": None, + "debt_window": None, + "debt_priority_field": None, + "debt_priority_values": None, + "debt_chart": None, + "debt_chart_title": None, + "debt_age_chart": None, + "debt_age_chart_title": None, + "debt_age_chart_bins": [30, 60, 90], + "waste_query": None, + "waste_window": None, + "waste_frequency": "MS", + "waste_chart": None, + "waste_chart_title": None, + "progress_report": None, + "progress_report_title": None, + "progress_report_epic_query_template": None, + "progress_report_story_query_template": None, + "progress_report_epic_deadline_field": None, + "progress_report_epic_min_stories_field": None, + "progress_report_epic_max_stories_field": None, + "progress_report_epic_team_field": None, + "progress_report_teams": None, + "progress_report_outcomes": None, + "progress_report_outcome_query": None, + "progress_report_outcome_deadline_field": None, }, - 'settings': { - 'queries': [], - 'query_attribute': None, - 'attributes': {}, - 'known_values': {}, - 'cycle': [], - 'max_results': None, - 'verbose': False, - - 'quantiles': [0.5, 0.85, 0.95], - - 'backlog_column': None, - 'committed_column': None, - 'done_column': None, - - 'cycle_time_data': None, - 'percentiles_data': None, - - 'scatterplot_window': None, - 'scatterplot_data': None, - 'scatterplot_chart': None, - 'scatterplot_chart_title': None, - - 'histogram_window': None, - 'histogram_data': None, - 'histogram_chart': None, - 'histogram_chart_title': None, - - 'cfd_window': None, - 'cfd_data': None, - 'cfd_chart': None, - 'cfd_chart_title': None, - - 'throughput_frequency': '1W-MON', - 'throughput_window': None, - 'throughput_data': None, - 'throughput_chart': None, - 'throughput_chart_title': None, - - 'burnup_window': None, - 'burnup_chart': None, - 'burnup_chart_title': None, - - 'burnup_forecast_window': None, - 'burnup_forecast_chart': None, - 'burnup_forecast_chart_title': None, - 'burnup_forecast_chart_target': None, - 'burnup_forecast_chart_deadline': None, - 'burnup_forecast_chart_deadline_confidence': None, - 'burnup_forecast_chart_trials': 100, - 'burnup_forecast_chart_throughput_window': 60, - 'burnup_forecast_chart_throughput_window_end': None, - - 'wip_frequency': '1W-MON', - 'wip_window': None, - 'wip_chart': None, - 'wip_chart_title': None, - - 'ageing_wip_chart': None, - 'ageing_wip_chart_title': None, - - 'net_flow_frequency': '1W-MON', - 'net_flow_window': None, - 'net_flow_chart': None, - 'net_flow_chart_title': None, - - 'impediments_data': None, - 'impediments_window': None, - 'impediments_chart': None, - 'impediments_chart_title': None, - 'impediments_days_chart': None, - 'impediments_days_chart_title': None, - 'impediments_status_chart': None, - 'impediments_status_chart_title': None, - 'impediments_status_days_chart': None, - 'impediments_status_days_chart_title': None, - - 'defects_query': None, - 'defects_window': None, - 'defects_priority_field': None, - 'defects_priority_values': None, - 'defects_type_field': None, - 'defects_type_values': None, - 'defects_environment_field': None, - 'defects_environment_values': None, - - 'defects_by_priority_chart': None, - 'defects_by_priority_chart_title': None, - 'defects_by_type_chart': None, - 'defects_by_type_chart_title': None, - 'defects_by_environment_chart': None, - 'defects_by_environment_chart_title': None, - - 'debt_query': None, - 'debt_window': None, - 'debt_priority_field': None, - 'debt_priority_values': None, - 'debt_chart': None, - 'debt_chart_title': None, - 'debt_age_chart': None, - 'debt_age_chart_title': None, - 'debt_age_chart_bins': [30, 60, 90], - - 'waste_query': None, - 'waste_window': None, - 'waste_frequency': 'MS', - 'waste_chart': None, - 'waste_chart_title': None, - - 'progress_report': None, - 'progress_report_title': None, - 'progress_report_epic_query_template': None, - 'progress_report_story_query_template': None, - 'progress_report_epic_deadline_field': None, - 'progress_report_epic_min_stories_field': None, - 'progress_report_epic_max_stories_field': None, - 'progress_report_epic_team_field': None, - 'progress_report_teams': None, - 'progress_report_outcomes': None, - 'progress_report_outcome_query': None, - 'progress_report_outcome_deadline_field': None, - } } # Recursively parse an `extends` file but only if a base path is given, # otherwise we can plausible leak files in server mode. - if 'extends' in config: + if "extends" in config: if cwd is None: raise ConfigError("`extends` is not supported here.") - extends_filename = os.path.abspath(os.path.normpath(os.path.join(cwd, config['extends'].replace('/', os.path.sep)))) + extends_filename = os.path.abspath( + os.path.normpath(os.path.join(cwd, config["extends"].replace("/", os.path.sep))) + ) if not os.path.exists(extends_filename): raise ConfigError("File `%s` referenced in `extends` not found." % extends_filename) from None - logger.debug("Extending file %s" % extends_filename) + logger.debug("Extending file %s", extends_filename) with open(extends_filename) as extends_file: options = config_to_options(extends_file.read(), cwd=os.path.dirname(extends_filename), extended=True) # Parse and validate Connection - if 'connection' in config: + if "connection" in config: - if 'domain' in config['connection']: - options['connection']['domain'] = config['connection']['domain'] + if "domain" in config["connection"]: + options["connection"]["domain"] = config["connection"]["domain"] - if 'username' in config['connection']: - options['connection']['username'] = config['connection']['username'] + if "username" in config["connection"]: + options["connection"]["username"] = config["connection"]["username"] - if 'password' in config['connection']: - options['connection']['password'] = config['connection']['password'] + if "password" in config["connection"]: + options["connection"]["password"] = config["connection"]["password"] - if 'http proxy' in config['connection']: - options['connection']['http_proxy'] = config['connection']['http proxy'] + if "http proxy" in config["connection"]: + options["connection"]["http_proxy"] = config["connection"]["http proxy"] - if 'https proxy' in config['connection']: - options['connection']['https_proxy'] = config['connection']['https proxy'] + if "https proxy" in config["connection"]: + options["connection"]["https_proxy"] = config["connection"]["https proxy"] - if 'jira client options' in config['connection']: - options['connection']['jira_client_options'] = config['connection']['jira client options'] + if "jira client options" in config["connection"]: + options["connection"]["jira_client_options"] = config["connection"]["jira client options"] - if 'jira server version check' in config['connection']: - options['connection']['jira_server_version_check'] = config['connection']['jira server version check'] + if "jira server version check" in config["connection"]: + options["connection"]["jira_server_version_check"] = config["connection"]["jira server version check"] # Parse and validate output options - if 'output' in config: + if "output" in config: - if 'quantiles' in config['output']: + if "quantiles" in config["output"]: try: - options['settings']['quantiles'] = list(map(float, config['output']['quantiles'])) + options["settings"]["quantiles"] = list(map(float, config["output"]["quantiles"])) except ValueError: - raise ConfigError("Could not convert value `%s` for key `quantiles` to a list of decimals" % (config['output']['quantiles'],)) from None + raise ConfigError( + "Could not convert value `%s` for key `quantiles` to a list of decimals" + % (config["output"]["quantiles"],) + ) from None # int values for key in [ - 'scatterplot_window', - 'histogram_window', - 'wip_window', - 'net_flow_window', - 'throughput_window', - 'cfd_window', - 'burnup_window', - 'burnup_forecast_window', - 'burnup_forecast_chart_throughput_window', - 'burnup_forecast_chart_target', - 'burnup_forecast_chart_trials', - 'impediments_window', - 'defects_window', - 'debt_window', - 'waste_window', + "scatterplot_window", + "histogram_window", + "wip_window", + "net_flow_window", + "throughput_window", + "cfd_window", + "burnup_window", + "burnup_forecast_window", + "burnup_forecast_chart_throughput_window", + "burnup_forecast_chart_target", + "burnup_forecast_chart_trials", + "impediments_window", + "defects_window", + "debt_window", + "waste_window", ]: - if expand_key(key) in config['output']: - options['settings'][key] = force_int(key, config['output'][expand_key(key)]) + if expand_key(key) in config["output"]: + options["settings"][key] = force_int(key, config["output"][expand_key(key)]) # float values for key in [ - 'burnup_forecast_chart_deadline_confidence', + "burnup_forecast_chart_deadline_confidence", ]: - if expand_key(key) in config['output']: - options['settings'][key] = force_float(key, config['output'][expand_key(key)]) + if expand_key(key) in config["output"]: + options["settings"][key] = force_float(key, config["output"][expand_key(key)]) # date values for key in [ - 'burnup_forecast_chart_throughput_window_end', - 'burnup_forecast_chart_deadline', + "burnup_forecast_chart_throughput_window_end", + "burnup_forecast_chart_deadline", ]: - if expand_key(key) in config['output']: - options['settings'][key] = force_date(key, config['output'][expand_key(key)]) + if expand_key(key) in config["output"]: + options["settings"][key] = force_date(key, config["output"][expand_key(key)]) # file name values for key in [ - 'scatterplot_chart', - 'histogram_chart', - 'cfd_chart', - 'throughput_chart', - 'burnup_chart', - 'burnup_forecast_chart', - 'wip_chart', - 'ageing_wip_chart', - 'net_flow_chart', - 'impediments_chart', - 'impediments_days_chart', - 'impediments_status_chart', - 'impediments_status_days_chart', - 'defects_by_priority_chart', - 'defects_by_type_chart', - 'defects_by_environment_chart', - 'debt_chart', - 'debt_age_chart', - 'waste_chart', - 'progress_report', + "scatterplot_chart", + "histogram_chart", + "cfd_chart", + "throughput_chart", + "burnup_chart", + "burnup_forecast_chart", + "wip_chart", + "ageing_wip_chart", + "net_flow_chart", + "impediments_chart", + "impediments_days_chart", + "impediments_status_chart", + "impediments_status_days_chart", + "defects_by_priority_chart", + "defects_by_type_chart", + "defects_by_environment_chart", + "debt_chart", + "debt_age_chart", + "waste_chart", + "progress_report", ]: - if expand_key(key) in config['output']: - options['settings'][key] = os.path.basename(config['output'][expand_key(key)]) + if expand_key(key) in config["output"]: + options["settings"][key] = os.path.basename(config["output"][expand_key(key)]) # file name list values for key in [ - 'cycle_time_data', - 'cfd_data', - 'scatterplot_data', - 'histogram_data', - 'throughput_data', - 'percentiles_data', - - 'impediments_data', + "cycle_time_data", + "cfd_data", + "scatterplot_data", + "histogram_data", + "throughput_data", + "percentiles_data", + "impediments_data", ]: - if expand_key(key) in config['output']: - options['settings'][key] = list(map(os.path.basename, force_list(config['output'][expand_key(key)]))) + if expand_key(key) in config["output"]: + options["settings"][key] = list(map(os.path.basename, force_list(config["output"][expand_key(key)]))) # list values for key in [ - 'defects_priority_values', - 'defects_type_values', - 'defects_environment_values', - 'debt_priority_values', - 'debt_age_chart_bins', + "defects_priority_values", + "defects_type_values", + "defects_environment_values", + "debt_priority_values", + "debt_age_chart_bins", ]: - if expand_key(key) in config['output']: - options['settings'][key] = force_list(config['output'][expand_key(key)]) + if expand_key(key) in config["output"]: + options["settings"][key] = force_list(config["output"][expand_key(key)]) # string values that copy straight over for key in [ - 'backlog_column', - 'committed_column', - 'done_column', - 'throughput_frequency', - 'scatterplot_chart_title', - 'histogram_chart_title', - 'cfd_chart_title', - 'throughput_chart_title', - 'burnup_chart_title', - 'burnup_forecast_chart_title', - 'wip_chart_title', - 'wip_frequency', - 'ageing_wip_chart_title', - 'net_flow_chart_title', - 'net_flow_frequency', - 'impediments_chart_title', - 'impediments_days_chart_title', - 'impediments_status_chart_title', - 'impediments_status_days_chart_title', - 'defects_query', - 'defects_by_priority_chart_title', - 'defects_priority_field', - 'defects_by_type_chart_title', - 'defects_type_field', - 'defects_by_environment_chart_title', - 'defects_environment_field', - 'debt_query', - 'debt_priority_field', - 'debt_chart_title', - 'debt_age_chart_title', - 'waste_query', - 'waste_frequency', - 'waste_chart_title', - 'progress_report_title', - 'progress_report_epic_query_template', - 'progress_report_story_query_template', - 'progress_report_epic_deadline_field', - 'progress_report_epic_min_stories_field', - 'progress_report_epic_max_stories_field', - 'progress_report_epic_team_field', - 'progress_report_outcome_query', - 'progress_report_outcome_deadline_field', + "backlog_column", + "committed_column", + "done_column", + "throughput_frequency", + "scatterplot_chart_title", + "histogram_chart_title", + "cfd_chart_title", + "throughput_chart_title", + "burnup_chart_title", + "burnup_forecast_chart_title", + "wip_chart_title", + "wip_frequency", + "ageing_wip_chart_title", + "net_flow_chart_title", + "net_flow_frequency", + "impediments_chart_title", + "impediments_days_chart_title", + "impediments_status_chart_title", + "impediments_status_days_chart_title", + "defects_query", + "defects_by_priority_chart_title", + "defects_priority_field", + "defects_by_type_chart_title", + "defects_type_field", + "defects_by_environment_chart_title", + "defects_environment_field", + "debt_query", + "debt_priority_field", + "debt_chart_title", + "debt_age_chart_title", + "waste_query", + "waste_frequency", + "waste_chart_title", + "progress_report_title", + "progress_report_epic_query_template", + "progress_report_story_query_template", + "progress_report_epic_deadline_field", + "progress_report_epic_min_stories_field", + "progress_report_epic_max_stories_field", + "progress_report_epic_team_field", + "progress_report_outcome_query", + "progress_report_outcome_deadline_field", ]: - if expand_key(key) in config['output']: - options['settings'][key] = config['output'][expand_key(key)] + if expand_key(key) in config["output"]: + options["settings"][key] = config["output"][expand_key(key)] # Special objects for progress reports - if expand_key('progress_report_teams') in config['output']: - options['settings']['progress_report_teams'] = to_progress_report_teams_list(config['output'][expand_key('progress_report_teams')]) - if expand_key('progress_report_outcomes') in config['output']: - options['settings']['progress_report_outcomes'] = to_progress_report_outcomes_list(config['output'][expand_key('progress_report_outcomes')]) + if expand_key("progress_report_teams") in config["output"]: + options["settings"]["progress_report_teams"] = to_progress_report_teams_list( + config["output"][expand_key("progress_report_teams")] + ) + if expand_key("progress_report_outcomes") in config["output"]: + options["settings"]["progress_report_outcomes"] = to_progress_report_outcomes_list( + config["output"][expand_key("progress_report_outcomes")] + ) # Parse Queries and/or a single Query - if 'queries' in config: - options['settings']['query_attribute'] = config['queries'].get('attribute', None) - options['settings']['queries'] = [{ - 'value': q.get('value', None), - 'jql': q.get('jql', None), - } for q in config['queries']['criteria']] + if "queries" in config: + options["settings"]["query_attribute"] = config["queries"].get("attribute", None) + options["settings"]["queries"] = [ + {"value": q.get("value", None), "jql": q.get("jql", None)} for q in config["queries"]["criteria"] + ] - if 'query' in config: - options['settings']['queries'] = [{ - 'value': None, - 'jql': config['query'], - }] + if "query" in config: + options["settings"]["queries"] = [{"value": None, "jql": config["query"]}] - if not extended and len(options['settings']['queries']) == 0: + if not extended and len(options["settings"]["queries"]) == 0: logger.warning("No `Query` value or `Queries` section found. Many calculators rely on one of these.") # Parse Workflow. Assume first status is backlog and last status is complete. - if 'workflow' in config: - if len(config['workflow'].keys()) < 3: + if "workflow" in config: + if len(config["workflow"].keys()) < 3: raise ConfigError("`Workflow` section must contain at least three statuses") column_names = [] - for name, statuses in config['workflow'].items(): + for name, statuses in config["workflow"].items(): statuses = force_list(statuses) - options['settings']['cycle'].append({ - "name": name, - "statuses": statuses - }) + options["settings"]["cycle"].append({"name": name, "statuses": statuses}) column_names.append(name) - if options['settings']['backlog_column'] is None: - if options['settings']['committed_column'] is None: - options['settings']['backlog_column'] = column_names[0] - logger.info("`Backlog column` automatically set to `%s`", options['settings']['backlog_column']) - options['settings']['committed_column'] = column_names[1] - logger.info("`Committed column` automatically set to `%s`", options['settings']['committed_column']) + if options["settings"]["backlog_column"] is None: + if options["settings"]["committed_column"] is None: + options["settings"]["backlog_column"] = column_names[0] + logger.info("`Backlog column` automatically set to `%s`", options["settings"]["backlog_column"]) + options["settings"]["committed_column"] = column_names[1] + logger.info("`Committed column` automatically set to `%s`", options["settings"]["committed_column"]) else: - if options['settings']['committed_column'] not in column_names: - raise ConfigError("`Committed column` (%s) must exist in `Workflow`: %s" % (options['settings']['committed_column'], column_names)) - elif column_names.index(options['settings']['committed_column']) > 0: - options['settings']['backlog_column'] = column_names[column_names.index(options['settings']['committed_column'])-1] - logger.info("`Backlog column` automatically set to `%s`", options['settings']['backlog_column']) + if options["settings"]["committed_column"] not in column_names: + raise ConfigError( + "`Committed column` (%s) must exist in `Workflow`: %s" + % (options["settings"]["committed_column"], column_names) + ) + if column_names.index(options["settings"]["committed_column"]) > 0: + options["settings"]["backlog_column"] = column_names[ + column_names.index(options["settings"]["committed_column"]) - 1 + ] + logger.info("`Backlog column` automatically set to `%s`", options["settings"]["backlog_column"]) else: - raise ConfigError("There must be at least 1 column before `Committed column` (%s) in `Workflow`: %s" % (options['settings']['committed_column'], column_names)) + raise ConfigError( + "There must be at least 1 column before `Committed column` (%s) in `Workflow`: %s" + % (options["settings"]["committed_column"], column_names) + ) else: - if options['settings']['backlog_column'] not in column_names: - raise ConfigError("`Backlog column` (%s) must exist in `Workflow`: %s" % (options['settings']['backlog_column'], column_names)) - elif column_names.index(options['settings']['backlog_column']) < (len(column_names)-2): - options['settings']['committed_column'] = column_names[column_names.index(options['settings']['backlog_column'])+1] - logger.info("`Committed column` automatically set to `%s`", options['settings']['committed_column']) + if options["settings"]["backlog_column"] not in column_names: + raise ConfigError( + "`Backlog column` (%s) must exist in `Workflow`: %s" + % (options["settings"]["backlog_column"], column_names) + ) + if column_names.index(options["settings"]["backlog_column"]) < (len(column_names) - 2): + options["settings"]["committed_column"] = column_names[ + column_names.index(options["settings"]["backlog_column"]) + 1 + ] + logger.info("`Committed column` automatically set to `%s`", options["settings"]["committed_column"]) else: - raise ConfigError("There must be at least 2 columns after `Backlog column` (%s) in `Workflow`: %s" % (options['settings']['committed_column'], column_names)) - - if options['settings']['done_column'] is None: - options['settings']['done_column'] = column_names[-1] - logger.info("`Done column` automatically set to `%s`", options['settings']['done_column']) - elif options['settings']['done_column'] not in column_names: - raise ConfigError("`Done column` (%s) must exist in `Workflow`: %s" % (options['settings']['done_column'], column_names)) + raise ConfigError( + "There must be at least 2 columns after `Backlog column` (%s) in `Workflow`: %s" + % (options["settings"]["committed_column"], column_names) + ) + + if options["settings"]["done_column"] is None: + options["settings"]["done_column"] = column_names[-1] + logger.info("`Done column` automatically set to `%s`", options["settings"]["done_column"]) + elif options["settings"]["done_column"] not in column_names: + raise ConfigError( + "`Done column` (%s) must exist in `Workflow`: %s" % (options["settings"]["done_column"], column_names) + ) # backlog column must come before committed column - if not (column_names.index(options['settings']['backlog_column'])+1) == column_names.index(options['settings']['committed_column']): - raise ConfigError("`Backlog column` (%s) must come immediately before `Committed column` (%s) in `Workflow`" % (options['settings']['backlog_column'], options['settings']['committed_column'])) + if not (column_names.index(options["settings"]["backlog_column"]) + 1) == column_names.index( + options["settings"]["committed_column"] + ): + raise ConfigError( + "`Backlog column` (%s) must come immediately before `Committed column` (%s) in `Workflow`" + % (options["settings"]["backlog_column"], options["settings"]["committed_column"]) + ) # committed column must come before done column - if not column_names.index(options['settings']['committed_column']) < column_names.index(options['settings']['done_column']): - raise ConfigError("`Committed column` (%s) must come before `Done column` (%s) in `Workflow`: %s" % (options['settings']['committed_column'], options['settings']['done_column'], column_names)) + if not column_names.index(options["settings"]["committed_column"]) < column_names.index( + options["settings"]["done_column"] + ): + raise ConfigError( + "`Committed column` (%s) must come before `Done column` (%s) in `Workflow`: %s" + % (options["settings"]["committed_column"], options["settings"]["done_column"], column_names) + ) # Make sure we have workflow (but only if this file is not being extended by another) - if not extended and len(options['settings']['cycle']) == 0: + if not extended and len(options["settings"]["cycle"]) == 0: raise ConfigError("`Workflow` section not found") # Parse attributes (fields) - merge from extended file if needed - if 'attributes' in config: - options['settings']['attributes'].update(dict(config['attributes'])) + if "attributes" in config: + options["settings"]["attributes"].update(dict(config["attributes"])) - if 'known values' in config: - for name, values in config['known values'].items(): - options['settings']['known_values'][name] = force_list(values) + if "known values" in config: + for name, values in config["known values"].items(): + options["settings"]["known_values"][name] = force_list(values) return options diff --git a/jira_agile_metrics/config_test.py b/jira_agile_metrics/config_test.py index 876476d..76de710 100644 --- a/jira_agile_metrics/config_test.py +++ b/jira_agile_metrics/config_test.py @@ -1,13 +1,9 @@ import datetime -import tempfile import os.path +import tempfile + +from .config import force_list, expand_key, config_to_options, ConfigError -from .config import ( - force_list, - expand_key, - config_to_options, - ConfigError -) def test_force_list(): assert force_list(None) == [None] @@ -15,15 +11,18 @@ def test_force_list(): assert force_list(("foo",)) == ["foo"] assert force_list(["foo"]) == ["foo"] + def test_expand_key(): assert expand_key("foo") == "foo" assert expand_key("foo_bar") == "foo bar" assert expand_key("FOO") == "foo" assert expand_key("FOO_bar") == "foo bar" + def test_config_to_options_minimal(): - options = config_to_options("""\ + options = config_to_options( + """\ Connection: Domain: https://foo.com @@ -33,19 +32,21 @@ def test_config_to_options_minimal(): Backlog: Backlog In progress: Build Done: Done -""") +""" + ) - assert options['connection']['domain'] == 'https://foo.com' - assert options['settings']['queries'][0] == {'value': None, 'jql': '(filter=123)'} + assert options["connection"]["domain"] == "https://foo.com" + assert options["settings"]["queries"][0] == {"value": None, "jql": "(filter=123)"} - assert options['settings']['backlog_column'] == 'Backlog' - assert options['settings']['committed_column'] == 'In progress' - assert options['settings']['done_column'] == 'Done' + assert options["settings"]["backlog_column"] == "Backlog" + assert options["settings"]["committed_column"] == "In progress" + assert options["settings"]["done_column"] == "Done" def test_config_to_options_maximal(): - options = config_to_options("""\ + options = config_to_options( + """\ Connection: Domain: https://foo.com Username: user1 @@ -223,160 +224,155 @@ def test_config_to_options_maximal(): Epic query: project = ABS and type = Feature Progress report outcome deadline field: Due date Progress report outcome query: project = ABC AND type = Outcome AND resolution IS EMPTY -""") - - assert options['connection'] == { - 'domain': 'https://foo.com', - 'jira_client_options': {}, - 'password': 'apassword', - 'username': 'user1', - 'http_proxy': 'https://proxy1.local', - 'https_proxy': 'https://proxy2.local', - 'jira_server_version_check': True +""" + ) + + assert options["connection"] == { + "domain": "https://foo.com", + "jira_client_options": {}, + "password": "apassword", + "username": "user1", + "http_proxy": "https://proxy1.local", + "https_proxy": "https://proxy2.local", + "jira_server_version_check": True, } - assert options['settings'] == { - 'cycle': [ - {'name': 'Backlog', 'statuses': ['Backlog']}, - {'name': 'Committed', 'statuses': ['Next']}, - {'name': 'Build', 'statuses': ['Build']}, - {'name': 'Test', 'statuses': ['Code review', 'QA']}, - {'name': 'Done', 'statuses': ['Done']} + assert options["settings"] == { + "cycle": [ + {"name": "Backlog", "statuses": ["Backlog"]}, + {"name": "Committed", "statuses": ["Next"]}, + {"name": "Build", "statuses": ["Build"]}, + {"name": "Test", "statuses": ["Code review", "QA"]}, + {"name": "Done", "statuses": ["Done"]}, ], - - 'attributes': {'Release': 'Fix version/s', 'Team': 'Team'}, - 'known_values': {'Release': ['R01', 'R02', 'R03']}, - 'max_results': None, - 'verbose': False, - - 'queries': [{'jql': '(filter=123)', 'value': 'Team 1'}, - {'jql': '(filter=124)', 'value': 'Team 2'}], - 'query_attribute': 'Team', - - 'backlog_column': 'Backlog', - 'committed_column': 'Committed', - 'done_column': 'Done', - - 'quantiles': [0.1, 0.2], - - 'cycle_time_data': ['cycletime.csv'], - - 'ageing_wip_chart': 'ageing-wip.png', - 'ageing_wip_chart_title': 'Ageing WIP', - - 'burnup_window': 30, - 'burnup_chart': 'burnup.png', - 'burnup_chart_title': 'Burn-up', - - 'burnup_forecast_window': 30, - 'burnup_forecast_chart': 'burnup-forecast.png', - 'burnup_forecast_chart_deadline': datetime.date(2018, 6, 1), - 'burnup_forecast_chart_deadline_confidence': 0.85, - 'burnup_forecast_chart_target': 100, - 'burnup_forecast_chart_throughput_window': 30, - 'burnup_forecast_chart_throughput_window_end': datetime.date(2018, 3, 1), - 'burnup_forecast_chart_title': 'Burn-up forecast', - 'burnup_forecast_chart_trials': 50, - - 'cfd_window': 30, - 'cfd_chart': 'cfd.png', - 'cfd_chart_title': 'Cumulative Flow Diagram', - 'cfd_data': ['cfd.csv'], - - 'histogram_window': 30, - 'histogram_chart': 'histogram.png', - 'histogram_chart_title': 'Cycle time histogram', - 'histogram_data': ['histogram.csv'], - - 'net_flow_frequency': '5D', - 'net_flow_window': 3, - 'net_flow_chart': 'net-flow.png', - 'net_flow_chart_title': 'Net flow', - - 'percentiles_data': ['percentiles.csv'], - - 'scatterplot_window': 30, - 'scatterplot_chart': 'scatterplot.png', - 'scatterplot_chart_title': 'Cycle time scatter plot', - 'scatterplot_data': ['scatterplot.csv'], - - 'throughput_frequency': '1D', - 'throughput_window': 3, - 'throughput_chart': 'throughput.png', - 'throughput_chart_title': 'Throughput trend', - 'throughput_data': ['throughput.csv'], - - 'wip_frequency': '3D', - 'wip_window': 3, - 'wip_chart': 'wip.png', - 'wip_chart_title': 'Work in Progress', - - 'impediments_data': ['impediments.csv'], - 'impediments_window': 3, - 'impediments_chart': 'impediments.png', - 'impediments_chart_title': 'Impediments', - 'impediments_days_chart': 'impediments-days.png', - 'impediments_days_chart_title': 'Total impeded days', - 'impediments_status_chart': 'impediments-status.png', - 'impediments_status_chart_title': 'Impediments by status', - 'impediments_status_days_chart': 'impediments-status-days.png', - 'impediments_status_days_chart_title': 'Total impeded days by status', - - 'defects_query': 'issueType = Bug', - 'defects_window': 3, - 'defects_priority_field': 'Priority', - 'defects_priority_values': ['Low', 'Medium', 'High'], - 'defects_type_field': 'Type', - 'defects_type_values': ['Config', 'Data', 'Code'], - 'defects_environment_field': 'Environment', - 'defects_environment_values': ['SIT', 'UAT', 'PROD'], - - 'defects_by_priority_chart': 'defects-by-priority.png', - 'defects_by_priority_chart_title': 'Defects by priority', - 'defects_by_type_chart': 'defects-by-type.png', - 'defects_by_type_chart_title': 'Defects by type', - 'defects_by_environment_chart': 'defects-by-environment.png', - 'defects_by_environment_chart_title': 'Defects by environment', - - 'debt_query': 'issueType = "Tech debt"', - 'debt_window': 3, - 'debt_priority_field': 'Priority', - 'debt_priority_values': ['Low', 'Medium', 'High'], - 'debt_chart': 'tech-debt.png', - 'debt_chart_title': 'Technical debt', - 'debt_age_chart': 'tech-debt-age.png', - 'debt_age_chart_title': 'Technical debt age', - 'debt_age_chart_bins': [10, 20, 30], - - 'waste_query': 'issueType = Story AND resolution IN (Withdrawn, Invalid)', - 'waste_window': 3, - 'waste_frequency': '2W-WED', - 'waste_chart': 'waste.png', - 'waste_chart_title': 'Waste', - - 'progress_report': 'progress.html', - 'progress_report_title': 'Test progress report', - 'progress_report_epic_query_template': 'project = ABC AND type = Epic AND Outcome = {outcome}', - 'progress_report_story_query_template': 'project = ABC AND type = Story AND "Epic link" = {epic}', - 'progress_report_epic_deadline_field': 'Due date', - 'progress_report_epic_min_stories_field': 'Min stories', - 'progress_report_epic_max_stories_field': 'Max stories', - 'progress_report_epic_team_field': 'Team', - 'progress_report_teams': [ - {'name': 'Team one', 'max_throughput': 10, 'min_throughput': 5, 'throughput_samples': None, 'throughput_samples_window': None, 'wip': 1}, - {'name': 'Team two', 'max_throughput': None, 'min_throughput': None, 'throughput_samples': 'project = ABC AND type = Story AND team = "Team two" AND resolution = "Done"', 'wip': 2, 'throughput_samples_window': 6} + "attributes": {"Release": "Fix version/s", "Team": "Team"}, + "known_values": {"Release": ["R01", "R02", "R03"]}, + "max_results": None, + "verbose": False, + "queries": [{"jql": "(filter=123)", "value": "Team 1"}, {"jql": "(filter=124)", "value": "Team 2"}], + "query_attribute": "Team", + "backlog_column": "Backlog", + "committed_column": "Committed", + "done_column": "Done", + "quantiles": [0.1, 0.2], + "cycle_time_data": ["cycletime.csv"], + "ageing_wip_chart": "ageing-wip.png", + "ageing_wip_chart_title": "Ageing WIP", + "burnup_window": 30, + "burnup_chart": "burnup.png", + "burnup_chart_title": "Burn-up", + "burnup_forecast_window": 30, + "burnup_forecast_chart": "burnup-forecast.png", + "burnup_forecast_chart_deadline": datetime.date(2018, 6, 1), + "burnup_forecast_chart_deadline_confidence": 0.85, + "burnup_forecast_chart_target": 100, + "burnup_forecast_chart_throughput_window": 30, + "burnup_forecast_chart_throughput_window_end": datetime.date(2018, 3, 1), + "burnup_forecast_chart_title": "Burn-up forecast", + "burnup_forecast_chart_trials": 50, + "cfd_window": 30, + "cfd_chart": "cfd.png", + "cfd_chart_title": "Cumulative Flow Diagram", + "cfd_data": ["cfd.csv"], + "histogram_window": 30, + "histogram_chart": "histogram.png", + "histogram_chart_title": "Cycle time histogram", + "histogram_data": ["histogram.csv"], + "net_flow_frequency": "5D", + "net_flow_window": 3, + "net_flow_chart": "net-flow.png", + "net_flow_chart_title": "Net flow", + "percentiles_data": ["percentiles.csv"], + "scatterplot_window": 30, + "scatterplot_chart": "scatterplot.png", + "scatterplot_chart_title": "Cycle time scatter plot", + "scatterplot_data": ["scatterplot.csv"], + "throughput_frequency": "1D", + "throughput_window": 3, + "throughput_chart": "throughput.png", + "throughput_chart_title": "Throughput trend", + "throughput_data": ["throughput.csv"], + "wip_frequency": "3D", + "wip_window": 3, + "wip_chart": "wip.png", + "wip_chart_title": "Work in Progress", + "impediments_data": ["impediments.csv"], + "impediments_window": 3, + "impediments_chart": "impediments.png", + "impediments_chart_title": "Impediments", + "impediments_days_chart": "impediments-days.png", + "impediments_days_chart_title": "Total impeded days", + "impediments_status_chart": "impediments-status.png", + "impediments_status_chart_title": "Impediments by status", + "impediments_status_days_chart": "impediments-status-days.png", + "impediments_status_days_chart_title": "Total impeded days by status", + "defects_query": "issueType = Bug", + "defects_window": 3, + "defects_priority_field": "Priority", + "defects_priority_values": ["Low", "Medium", "High"], + "defects_type_field": "Type", + "defects_type_values": ["Config", "Data", "Code"], + "defects_environment_field": "Environment", + "defects_environment_values": ["SIT", "UAT", "PROD"], + "defects_by_priority_chart": "defects-by-priority.png", + "defects_by_priority_chart_title": "Defects by priority", + "defects_by_type_chart": "defects-by-type.png", + "defects_by_type_chart_title": "Defects by type", + "defects_by_environment_chart": "defects-by-environment.png", + "defects_by_environment_chart_title": "Defects by environment", + "debt_query": 'issueType = "Tech debt"', + "debt_window": 3, + "debt_priority_field": "Priority", + "debt_priority_values": ["Low", "Medium", "High"], + "debt_chart": "tech-debt.png", + "debt_chart_title": "Technical debt", + "debt_age_chart": "tech-debt-age.png", + "debt_age_chart_title": "Technical debt age", + "debt_age_chart_bins": [10, 20, 30], + "waste_query": "issueType = Story AND resolution IN (Withdrawn, Invalid)", + "waste_window": 3, + "waste_frequency": "2W-WED", + "waste_chart": "waste.png", + "waste_chart_title": "Waste", + "progress_report": "progress.html", + "progress_report_title": "Test progress report", + "progress_report_epic_query_template": "project = ABC AND type = Epic AND Outcome = {outcome}", + "progress_report_story_query_template": 'project = ABC AND type = Story AND "Epic link" = {epic}', + "progress_report_epic_deadline_field": "Due date", + "progress_report_epic_min_stories_field": "Min stories", + "progress_report_epic_max_stories_field": "Max stories", + "progress_report_epic_team_field": "Team", + "progress_report_teams": [ + { + "name": "Team one", + "max_throughput": 10, + "min_throughput": 5, + "throughput_samples": None, + "throughput_samples_window": None, + "wip": 1, + }, + { + "name": "Team two", + "max_throughput": None, + "min_throughput": None, + "throughput_samples": 'project = ABC AND type = Story AND team = "Team two" AND resolution = "Done"', + "wip": 2, + "throughput_samples_window": 6, + }, ], - 'progress_report_outcomes': [ - {'key': 'O1', 'name': 'Outcome one', 'deadline': datetime.date(2019, 6, 1), 'epic_query': None}, - {'key': None, 'name': 'Outcome two', 'deadline': None, 'epic_query': "project = ABS and type = Feature"} + "progress_report_outcomes": [ + {"key": "O1", "name": "Outcome one", "deadline": datetime.date(2019, 6, 1), "epic_query": None}, + {"key": None, "name": "Outcome two", "deadline": None, "epic_query": "project = ABS and type = Feature"}, ], - 'progress_report_outcome_deadline_field': 'Due date', - 'progress_report_outcome_query': 'project = ABC AND type = Outcome AND resolution IS EMPTY', + "progress_report_outcome_deadline_field": "Due date", + "progress_report_outcome_query": "project = ABC AND type = Outcome AND resolution IS EMPTY", } + def test_config_to_options_strips_directories(): - options = config_to_options("""\ + options = config_to_options( + """\ Connection: Domain: https://foo.com @@ -403,29 +399,31 @@ def test_config_to_options_strips_directories(): WIP chart: tmp/wip.png Ageing WIP chart: tmp/ageing-wip.png Net flow chart: tmp/net-flow.png -""") - - assert options['settings']['cycle_time_data'] == ['cycletime.csv'] - assert options['settings']['ageing_wip_chart'] == 'ageing-wip.png' - assert options['settings']['burnup_chart'] == 'burnup.png' - assert options['settings']['burnup_forecast_chart'] == 'burnup-forecast.png' - assert options['settings']['cfd_chart'] == 'cfd.png' - assert options['settings']['histogram_chart'] == 'histogram.png' - assert options['settings']['histogram_data'] == ['histogram.csv'] - assert options['settings']['net_flow_chart'] == 'net-flow.png' - assert options['settings']['percentiles_data'] == ['percentiles.csv'] - assert options['settings']['scatterplot_chart'] == 'scatterplot.png' - assert options['settings']['scatterplot_data'] == ['scatterplot.csv'] - assert options['settings']['throughput_chart'] == 'throughput.png' - assert options['settings']['throughput_data'] == ['throughput.csv'] - assert options['settings']['wip_chart'] == 'wip.png' +""" + ) + + assert options["settings"]["cycle_time_data"] == ["cycletime.csv"] + assert options["settings"]["ageing_wip_chart"] == "ageing-wip.png" + assert options["settings"]["burnup_chart"] == "burnup.png" + assert options["settings"]["burnup_forecast_chart"] == "burnup-forecast.png" + assert options["settings"]["cfd_chart"] == "cfd.png" + assert options["settings"]["histogram_chart"] == "histogram.png" + assert options["settings"]["histogram_data"] == ["histogram.csv"] + assert options["settings"]["net_flow_chart"] == "net-flow.png" + assert options["settings"]["percentiles_data"] == ["percentiles.csv"] + assert options["settings"]["scatterplot_chart"] == "scatterplot.png" + assert options["settings"]["scatterplot_data"] == ["scatterplot.csv"] + assert options["settings"]["throughput_chart"] == "throughput.png" + assert options["settings"]["throughput_data"] == ["throughput.csv"] + assert options["settings"]["wip_chart"] == "wip.png" def test_config_to_options_extends(): try: - with tempfile.NamedTemporaryFile(delete=False) as fp: + with tempfile.NamedTemporaryFile(delete=False) as config_file: # Base file - fp.write(b"""\ + config_file.write( + b"""\ Connection: Domain: https://foo.com @@ -446,13 +444,15 @@ def test_config_to_options_extends(): Backlog column: Backlog Committed column: In progress Done column: Done -""") +""" + ) - fp.seek(0) + config_file.seek(0) # Extend the file - options = config_to_options(""" + options = config_to_options( + """ Extends: %s Connection: @@ -471,37 +471,42 @@ def test_config_to_options_extends(): - 0.7 Cycle time data: cycletime.csv -""" % fp.name, cwd=os.path.abspath(fp.name)) +""" + % config_file.name, + cwd=os.path.abspath(config_file.name), + ) finally: - os.remove(fp.name) + os.remove(config_file.name) # overridden - assert options['connection']['domain'] == 'https://bar.com' + assert options["connection"]["domain"] == "https://bar.com" # from extended base - assert options['settings']['backlog_column'] == 'Backlog' - assert options['settings']['committed_column'] == 'In progress' - assert options['settings']['done_column'] == 'Done' + assert options["settings"]["backlog_column"] == "Backlog" + assert options["settings"]["committed_column"] == "In progress" + assert options["settings"]["done_column"] == "Done" # from extending file - assert options['settings']['cycle_time_data'] == ['cycletime.csv'] + assert options["settings"]["cycle_time_data"] == ["cycletime.csv"] # overridden - assert options['settings']['quantiles'] == [0.5, 0.7] + assert options["settings"]["quantiles"] == [0.5, 0.7] # merged - assert options['settings']['attributes'] == { - 'Release': 'Release number', - 'Priority': 'Severity', - 'Team': 'Assigned team' + assert options["settings"]["attributes"] == { + "Release": "Release number", + "Priority": "Severity", + "Team": "Assigned team", } + def test_config_to_options_extends_blocked_if_no_explicit_working_directory(): - with tempfile.NamedTemporaryFile() as fp: + with tempfile.NamedTemporaryFile() as config_file: # Base file - fp.write(b"""\ + config_file.write( + b"""\ Connection: Domain: https://foo.com @@ -518,14 +523,16 @@ def test_config_to_options_extends_blocked_if_no_explicit_working_directory(): Backlog column: Backlog Committed column: Committed Done column: Done -""") +""" + ) - fp.seek(0) + config_file.seek(0) # Extend the file try: - config_to_options(""" + config_to_options( + """ Extends: %s Connection: @@ -540,16 +547,21 @@ def test_config_to_options_extends_blocked_if_no_explicit_working_directory(): - 0.7 Cycle time data: cycletime.csv -""" % fp.name, cwd=None) +""" + % config_file.name, + cwd=None, + ) except ConfigError: assert True else: assert False + def test_config_to_options_jira_server_bypass(): - options = config_to_options("""\ + options = config_to_options( + """\ Connection: Domain: https://foo.com JIRA server version check: False @@ -560,8 +572,8 @@ def test_config_to_options_jira_server_bypass(): Backlog: Backlog In progress: Build Done: Done -""") - - assert options['connection']['domain'] == 'https://foo.com' - assert options['connection']['jira_server_version_check'] == False +""" + ) + assert options["connection"]["domain"] == "https://foo.com" + assert not options["connection"]["jira_server_version_check"] diff --git a/jira_agile_metrics/conftest.py b/jira_agile_metrics/conftest.py index 169ef9a..b5613ba 100644 --- a/jira_agile_metrics/conftest.py +++ b/jira_agile_metrics/conftest.py @@ -1,292 +1,429 @@ -import pytest from pandas import DataFrame, Timestamp, NaT +import pytest +from .calculators.cfd import CFDCalculator +from .calculators.cycletime import CycleTimeCalculator from .querymanager import QueryManager from .utils import extend_dict -from .calculators.cycletime import CycleTimeCalculator -from .calculators.cfd import CFDCalculator # Fake a portion of the JIRA API -class FauxFieldValue(object): - """A complex field value, with a name and a typed value - """ + +class FauxFieldValue: + """A complex field value, with a name and a typed value""" + def __init__(self, name, value): self.name = name self.value = value -class FauxFields(object): - """Container for `issue.fields` - """ + +class FauxFields: + """Container for `issue.fields`""" + def __init__(self, fields): self.__dict__.update(fields) -class FauxChangeItem(object): - """An item in a changelog change - """ + +class FauxChangeItem: + """An item in a changelog change""" + def __init__(self, field, fromString, toString): self.field = field self.from_ = self.fromString = fromString self.to = self.toString = toString -class FauxChange(object): - """A change in a changelog. Contains a list of change items. - """ + +class FauxChange: + """A change in a changelog. Contains a list of change items.""" + def __init__(self, created, items): self.created = created self.items = [FauxChangeItem(*i) for i in items] -class FauxChangelog(object): - """A changelog. Contains a list of changes in `histories`. - """ + +class FauxChangelog: + """A changelog. Contains a list of changes in `histories`.""" + def __init__(self, changes): self.histories = changes -class FauxIssue(object): - """An issue, with a key, change log, and set of fields - """ + +class FauxIssue: + """An issue, with a key, change log, and set of fields""" + def __init__(self, key, changes, **fields): self.key = key self.fields = FauxFields(fields) self.changelog = FauxChangelog(changes) -class FauxJIRA(object): + +class FauxJIRA: """JIRA interface. Initialised with a set of issues, which will be returned by `search_issues()`. """ - def __init__(self, fields, issues, options={'server': 'https://example.org'}, filter=None): + def __init__(self, fields, issues, options=None, filter_=None): + if options is None: + options = {"server": "https://example.org"} self._options = options self._fields = fields # [{ id, name }] self._issues = issues - self._filter = filter + self._filter = filter_ + + def client_info(self): + """Get the server this client is connected to.""" + return self._options["server"] def fields(self): return self._fields + def issues(self): + return self._issues + def search_issues(self, jql, *args, **kwargs): return self._issues if self._filter is None else [i for i in self._issues if self._filter(i, jql)] + # Fixtures -@pytest.fixture -def minimal_settings(): + +@pytest.fixture(name="minimal_settings") +def fixture_minimal_settings(): """The smallest `settings` required to build a query manager and cycle time calculation. """ return { - 'attributes': {}, - 'known_values': { - 'Release': ['R1', 'R3'] - }, - 'max_results': None, - 'verbose': False, - 'cycle': [ - {'name': 'Backlog', 'statuses': ['Backlog']}, - {'name': 'Committed', 'statuses': ['Next']}, - {'name': 'Build', 'statuses': ['Build']}, - {'name': 'Test', 'statuses': ['Code review', 'QA']}, - {'name': 'Done', 'statuses': ['Done']} + "attributes": {}, + "known_values": {"Release": ["R1", "R3"]}, + "max_results": None, + "verbose": False, + "cycle": [ + {"name": "Backlog", "statuses": ["Backlog"]}, + {"name": "Committed", "statuses": ["Next"]}, + {"name": "Build", "statuses": ["Build"]}, + {"name": "Test", "statuses": ["Code review", "QA"]}, + {"name": "Done", "statuses": ["Done"]}, ], - 'query_attribute': None, - 'queries': [{'jql': '(filter=123)', 'value': None}], - - 'backlog_column': 'Backlog', - 'committed_column': 'Committed', - 'done_column': 'Done', + "query_attribute": None, + "queries": [{"jql": "(filter=123)", "value": None}], + "backlog_column": "Backlog", + "committed_column": "Committed", + "done_column": "Done", } -@pytest.fixture -def custom_settings(minimal_settings): - """A `settings` dict that uses custom fields and attributes. - """ - return extend_dict(minimal_settings, { - 'attributes': { - 'Release': 'Releases', - 'Team': 'Team', - 'Estimate': 'Size' +@pytest.fixture(name="custom_settings") +def fixture_custom_settings(minimal_settings): + """A `settings` dict that uses custom fields and attributes.""" + return extend_dict( + minimal_settings, + { + "attributes": {"Release": "Releases", "Team": "Team", "Estimate": "Size"}, + "known_values": {"Release": ["R1", "R3"]}, }, - 'known_values': { - 'Release': ['R1', 'R3'] - }, - }) + ) + # Fields + corresponding columns -@pytest.fixture -def minimal_fields(): - """A `fields` list for all basic fields, but no custom fields. - """ + +@pytest.fixture(name="minimal_fields") +def fixture_minimal_fields(): + """A `fields` list for all basic fields, but no custom fields.""" return [ - {'id': 'summary', 'name': 'Summary'}, - {'id': 'issuetype', 'name': 'Issue type'}, - {'id': 'status', 'name': 'Status'}, - {'id': 'resolution', 'name': 'Resolution'}, - {'id': 'created', 'name': 'Created date'}, - {'id': 'customfield_100', 'name': 'Flagged'}, + {"id": "summary", "name": "Summary"}, + {"id": "issuetype", "name": "Issue type"}, + {"id": "status", "name": "Status"}, + {"id": "resolution", "name": "Resolution"}, + {"id": "created", "name": "Created date"}, + {"id": "customfield_100", "name": "Flagged"}, ] -@pytest.fixture -def custom_fields(minimal_fields): - """A `fields` list with the three custom fields used by `custom_settings` - """ + +@pytest.fixture(name="custom_fields") +def fixture_custom_fields(minimal_fields): + """A `fields` list with the three custom fields used by `custom_settings`""" return minimal_fields + [ - {'id': 'customfield_001', 'name': 'Team'}, - {'id': 'customfield_002', 'name': 'Size'}, - {'id': 'customfield_003', 'name': 'Releases'}, + {"id": "customfield_001", "name": "Team"}, + {"id": "customfield_002", "name": "Size"}, + {"id": "customfield_003", "name": "Releases"}, ] -@pytest.fixture -def minimal_cycle_time_columns(): + +@pytest.fixture(name="minimal_cycle_time_columns") +def fixture_minimal_cycle_time_columns(): """A columns list for the results of CycleTimeCalculator without any custom fields. """ return [ - 'key', 'url', 'issue_type', 'summary', 'status', 'resolution', - 'cycle_time', 'completed_timestamp', 'blocked_days', 'impediments', - 'Backlog', 'Committed', 'Build', 'Test', 'Done' + "key", + "url", + "issue_type", + "summary", + "status", + "resolution", + "cycle_time", + "completed_timestamp", + "blocked_days", + "impediments", + "Backlog", + "Committed", + "Build", + "Test", + "Done", ] -@pytest.fixture -def custom_cycle_time_columns(minimal_fields): + +@pytest.fixture(name="custom_cycle_time_columns") +def fixture_custom_cycle_time_columns(minimal_fields): """A columns list for the results of CycleTimeCalculator with the three custom fields from `custom_settings`. """ return [ - 'key', 'url', 'issue_type', 'summary', 'status', 'resolution', - 'Estimate', 'Release', 'Team', - 'cycle_time', 'completed_timestamp', 'blocked_days', 'impediments', - 'Backlog', 'Committed', 'Build', 'Test', 'Done' + "key", + "url", + "issue_type", + "summary", + "status", + "resolution", + "Estimate", + "Release", + "Team", + "cycle_time", + "completed_timestamp", + "blocked_days", + "impediments", + "Backlog", + "Committed", + "Build", + "Test", + "Done", ] -@pytest.fixture -def cfd_columns(): - """A columns list for the results of the CFDCalculator. - """ - return [ - 'Backlog', - 'Committed', - 'Build', - 'Test', - 'Done' - ] + +@pytest.fixture(name="cfd_columns") +def fixture_cfd_columns(): + """A columns list for the results of the CFDCalculator.""" + return ["Backlog", "Committed", "Build", "Test", "Done"] + # Query manager -@pytest.fixture -def minimal_query_manager(minimal_fields, minimal_settings): - """A minimal query manager (no custom fields) - """ + +@pytest.fixture(name="minimal_query_manager") +def fixture_minimal_query_manager(minimal_fields, minimal_settings): + """A minimal query manager (no custom fields)""" jira = FauxJIRA(fields=minimal_fields, issues=[]) return QueryManager(jira, minimal_settings) -@pytest.fixture -def custom_query_manager(custom_fields, custom_settings): - """A query manager capable of returning values for custom fields - """ + +@pytest.fixture(name="custom_query_manager") +def fixture_custom_query_manager(custom_fields, custom_settings): + """A query manager capable of returning values for custom fields""" jira = FauxJIRA(fields=custom_fields, issues=[]) return QueryManager(jira, custom_settings) # Results object with rich cycle time data + def _issues(issues): - return [{ - 'key': 'A-%d' % (idx + 1), - 'url': 'https://example.org/browse/A-%d' % (idx + 1), - 'issue_type': 'Story', - 'summary': 'Generated issue A-%d' % (idx + 1), - 'status': ( - "Done" if i['Done'] is not NaT else - "Test" if i['Test'] is not NaT else - "Build" if i['Build'] is not NaT else - "Committed" if i['Committed'] is not NaT else - "Backlog" - ), - 'resoluton': "Done" if i['Done'] is not NaT else None, - 'completed_timestamp': i['Done'] if i['Done'] is not NaT else None, - 'cycle_time': (i['Done'] - i['Committed']) if (i['Done'] is not NaT and i['Committed'] is not NaT) else None, - 'blocked_days': i.get('blocked_days', 0), - 'impediments': i.get('impediments', []), - 'Backlog': i['Backlog'], - 'Committed': i['Committed'], - 'Build': i['Build'], - 'Test': i['Test'], - 'Done': i['Done'] - } for idx, i in enumerate(issues)] + return [ + { + "key": "A-%d" % (idx + 1), + "url": "https://example.org/browse/A-%d" % (idx + 1), + "issue_type": "Story", + "summary": "Generated issue A-%d" % (idx + 1), + "status": ( + "Done" + if i["Done"] is not NaT + else "Test" + if i["Test"] is not NaT + else "Build" + if i["Build"] is not NaT + else "Committed" + if i["Committed"] is not NaT + else "Backlog" + ), + "resoluton": "Done" if i["Done"] is not NaT else None, + "completed_timestamp": i["Done"] if i["Done"] is not NaT else None, + "cycle_time": (i["Done"] - i["Committed"]) + if (i["Done"] is not NaT and i["Committed"] is not NaT) + else None, + "blocked_days": i.get("blocked_days", 0), + "impediments": i.get("impediments", []), + "Backlog": i["Backlog"], + "Committed": i["Committed"], + "Build": i["Build"], + "Test": i["Test"], + "Done": i["Done"], + } + for idx, i in enumerate(issues) + ] + def _ts(datestring, timestring="00:00:00", freq=None): - return Timestamp('%s %s' % (datestring, timestring,), freq=freq) + return Timestamp("%s %s" % (datestring, timestring), freq=freq) -@pytest.fixture -def minimal_cycle_time_results(minimal_cycle_time_columns): - """A results dict mimicing a minimal result from the CycleTimeCalculator. - """ + +@pytest.fixture(name="minimal_cycle_time_results") +def fixture_minimal_cycle_time_results(minimal_cycle_time_columns): + """A results dict mimicing a minimal result from the CycleTimeCalculator.""" return { - CycleTimeCalculator: DataFrame(_issues([ - dict(Backlog=_ts('2018-01-01'), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-02'), Committed=_ts('2018-01-03'), Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-03'), Committed=_ts('2018-01-03'), Build=_ts('2018-01-04'), Test=_ts('2018-01-05'), Done=_ts('2018-01-06')), - dict(Backlog=_ts('2018-01-04'), Committed=_ts('2018-01-04'), Build=NaT, Test=NaT, Done=NaT), - ]), columns=minimal_cycle_time_columns) + CycleTimeCalculator: DataFrame( + _issues( + [ + dict(Backlog=_ts("2018-01-01"), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), + dict(Backlog=_ts("2018-01-02"), Committed=_ts("2018-01-03"), Build=NaT, Test=NaT, Done=NaT), + dict( + Backlog=_ts("2018-01-03"), + Committed=_ts("2018-01-03"), + Build=_ts("2018-01-04"), + Test=_ts("2018-01-05"), + Done=_ts("2018-01-06"), + ), + dict(Backlog=_ts("2018-01-04"), Committed=_ts("2018-01-04"), Build=NaT, Test=NaT, Done=NaT), + ] + ), + columns=minimal_cycle_time_columns, + ) } -@pytest.fixture -def large_cycle_time_results(minimal_cycle_time_columns): - """A results dict mimicing a larger result from the CycleTimeCalculator. - """ + +@pytest.fixture(name="large_cycle_time_results") +def fixture_large_cycle_time_results(minimal_cycle_time_columns): + """A results dict mimicing a larger result from the CycleTimeCalculator.""" return { - CycleTimeCalculator: DataFrame(_issues([ - # three issues in the backlog - dict(Backlog=_ts('2018-01-01'), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-02'), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-03'), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), - - # three issues started - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-03'), Build=NaT, Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-03'), Build=NaT, Test=NaT, Done=NaT), - - # three issues in build - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-04'), Test=NaT, Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-04'), Test=NaT, Done=NaT), - - # three issues in test - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=_ts('2018-01-04'), Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=_ts('2018-01-05'), Done=NaT), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=_ts('2018-01-05'), Done=NaT), - - # six issues done, with different cycle times - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=_ts('2018-01-04'), Done=_ts('2018-01-07')), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-02'), Build=_ts('2018-01-03'), Test=_ts('2018-01-05'), Done=_ts('2018-01-07')), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-03'), Build=_ts('2018-01-03'), Test=_ts('2018-01-05'), Done=_ts('2018-01-08')), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-03'), Build=_ts('2018-01-03'), Test=_ts('2018-01-04'), Done=_ts('2018-01-08')), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-04'), Build=_ts('2018-01-05'), Test=_ts('2018-01-05'), Done=_ts('2018-01-09')), - dict(Backlog=_ts('2018-01-01'), Committed=_ts('2018-01-05'), Build=_ts('2018-01-06'), Test=_ts('2018-01-08'), Done=_ts('2018-01-09')), - ]), columns=minimal_cycle_time_columns) + CycleTimeCalculator: DataFrame( + _issues( + [ + # three issues in the backlog + dict(Backlog=_ts("2018-01-01"), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), + dict(Backlog=_ts("2018-01-02"), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), + dict(Backlog=_ts("2018-01-03"), Committed=NaT, Build=NaT, Test=NaT, Done=NaT), + # three issues started + dict(Backlog=_ts("2018-01-01"), Committed=_ts("2018-01-02"), Build=NaT, Test=NaT, Done=NaT), + dict(Backlog=_ts("2018-01-01"), Committed=_ts("2018-01-03"), Build=NaT, Test=NaT, Done=NaT), + dict(Backlog=_ts("2018-01-01"), Committed=_ts("2018-01-03"), Build=NaT, Test=NaT, Done=NaT), + # three issues in build + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=NaT, + Done=NaT, + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-04"), + Test=NaT, + Done=NaT, + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-04"), + Test=NaT, + Done=NaT, + ), + # three issues in test + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-04"), + Done=NaT, + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-05"), + Done=NaT, + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-05"), + Done=NaT, + ), + # six issues done, with different cycle times + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-04"), + Done=_ts("2018-01-07"), + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-02"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-05"), + Done=_ts("2018-01-07"), + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-03"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-05"), + Done=_ts("2018-01-08"), + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-03"), + Build=_ts("2018-01-03"), + Test=_ts("2018-01-04"), + Done=_ts("2018-01-08"), + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-04"), + Build=_ts("2018-01-05"), + Test=_ts("2018-01-05"), + Done=_ts("2018-01-09"), + ), + dict( + Backlog=_ts("2018-01-01"), + Committed=_ts("2018-01-05"), + Build=_ts("2018-01-06"), + Test=_ts("2018-01-08"), + Done=_ts("2018-01-09"), + ), + ] + ), + columns=minimal_cycle_time_columns, + ) } -@pytest.fixture -def minimal_cfd_results(minimal_cycle_time_results, cfd_columns): - """A results dict mimicing a minimal result from the CycleTimeCalculator. - """ - return extend_dict(minimal_cycle_time_results, { - CFDCalculator: DataFrame([ - {'Backlog': 1.0, 'Committed': 0.0, 'Build': 0.0, 'Test': 0.0, 'Done': 0.0}, - {'Backlog': 2.0, 'Committed': 0.0, 'Build': 0.0, 'Test': 0.0, 'Done': 0.0}, - {'Backlog': 3.0, 'Committed': 2.0, 'Build': 0.0, 'Test': 0.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Committed': 3.0, 'Build': 1.0, 'Test': 0.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Committed': 3.0, 'Build': 1.0, 'Test': 1.0, 'Done': 0.0}, - {'Backlog': 4.0, 'Committed': 3.0, 'Build': 1.0, 'Test': 1.0, 'Done': 1.0}, - ], columns=cfd_columns, index=[ - _ts('2018-01-01', '00:00:00', freq='D'), - _ts('2018-01-02', '00:00:00', freq='D'), - _ts('2018-01-03', '00:00:00', freq='D'), - _ts('2018-01-04', '00:00:00', freq='D'), - _ts('2018-01-05', '00:00:00', freq='D'), - _ts('2018-01-06', '00:00:00', freq='D') - ]) - }) + +@pytest.fixture(name="minimal_cfd_results") +def fixture_minimal_cfd_results(minimal_cycle_time_results, cfd_columns): + """A results dict mimicing a minimal result from the CycleTimeCalculator.""" + return extend_dict( + minimal_cycle_time_results, + { + CFDCalculator: DataFrame( + [ + {"Backlog": 1.0, "Committed": 0.0, "Build": 0.0, "Test": 0.0, "Done": 0.0}, + {"Backlog": 2.0, "Committed": 0.0, "Build": 0.0, "Test": 0.0, "Done": 0.0}, + {"Backlog": 3.0, "Committed": 2.0, "Build": 0.0, "Test": 0.0, "Done": 0.0}, + {"Backlog": 4.0, "Committed": 3.0, "Build": 1.0, "Test": 0.0, "Done": 0.0}, + {"Backlog": 4.0, "Committed": 3.0, "Build": 1.0, "Test": 1.0, "Done": 0.0}, + {"Backlog": 4.0, "Committed": 3.0, "Build": 1.0, "Test": 1.0, "Done": 1.0}, + ], + columns=cfd_columns, + index=[ + _ts("2018-01-01", "00:00:00", freq="D"), + _ts("2018-01-02", "00:00:00", freq="D"), + _ts("2018-01-03", "00:00:00", freq="D"), + _ts("2018-01-04", "00:00:00", freq="D"), + _ts("2018-01-05", "00:00:00", freq="D"), + _ts("2018-01-06", "00:00:00", freq="D"), + ], + ) + }, + ) diff --git a/jira_agile_metrics/querymanager.py b/jira_agile_metrics/querymanager.py index bec4d3c..be4daa5 100644 --- a/jira_agile_metrics/querymanager.py +++ b/jira_agile_metrics/querymanager.py @@ -1,16 +1,18 @@ -import json import itertools +import json import logging + import dateutil.parser import dateutil.tz from .config import ConfigError + logger = logging.getLogger(__name__) -class IssueSnapshot(object): - """A snapshot of the key fields of an issue at a point in its change history - """ + +class IssueSnapshot: + """A snapshot of the key fields of an issue at a point in its change history""" def __init__(self, change, key, date, from_string, to_string): self.change = change @@ -20,28 +22,30 @@ def __init__(self, change, key, date, from_string, to_string): self.to_string = to_string def __eq__(self, other): - return all(( - self.change == other.change, - self.key == other.key, - self.date.isoformat() == other.date.isoformat(), - self.from_string == other.from_string, - self.to_string == other.to_string - )) + return all( + ( + self.change == other.change, + self.key == other.key, + self.date.isoformat() == other.date.isoformat(), + self.from_string == other.from_string, + self.to_string == other.to_string, + ) + ) def __repr__(self): return "" % ( - self.change, self.key, self.date.isoformat(), self.from_string, self.to_string, + self.change, + self.key, + self.date.isoformat(), + self.from_string, + self.to_string, ) -class QueryManager(object): - """Manage and execute queries - """ - settings = dict( - attributes={}, - known_values={}, - max_results=False, - ) +class QueryManager: + """Manage and execute queries""" + + settings = dict(attributes={}, known_values={}, max_results=False) def __init__(self, jira, settings): self.jira = jira @@ -56,25 +60,29 @@ def __init__(self, jira, settings): self.jira_fields = self.jira.fields() if len(self.jira_fields) == 0: - raise ConfigError("No field data retrieved from JIRA. This likely means a problem with the JIRA API.") from None + raise ConfigError( + "No field data retrieved from JIRA. This likely means a problem with the JIRA API." + ) from None - self.jira_fields_to_names = {field['id']: field['name'] for field in self.jira_fields} + self.jira_fields_to_names = {field["id"]: field["name"] for field in self.jira_fields} field_id = None - for name, field in self.settings['attributes'].items(): + for name, field in self.settings["attributes"].items(): field_id = self.field_name_to_id(field) self.attributes_to_fields[name] = field_id self.fields_to_attributes[field_id] = name def field_name_to_id(self, name): try: - return next((f['id'] for f in self.jira_fields if f['name'].lower() == name.lower())) + return next((f["id"] for f in self.jira_fields if f["name"].lower() == name.lower())) except StopIteration: # XXX: we are having problems with this falsely claiming fields don't exist logger.debug("Failed to look up %s in JIRA fields: %s", name, json.dumps(self.jira_fields)) - raise ConfigError("JIRA field with name `%s` does not exist (did you try to use the field id instead?)" % name) from None + raise ConfigError( + "JIRA field with name `%s` does not exist (did you try to use the field id instead?)" % name + ) from None def resolve_attribute_value(self, issue, attribute_name): """Given an attribute name (i.e. one named in the config file and @@ -94,28 +102,30 @@ def resolve_field_value(self, issue, field_id): try: field_value = getattr(issue.fields, field_id) except AttributeError: - field_name = self.jira_fields_to_names.get(field_id, 'Unknown name') - logger.debug("Could not get field value for field {}. Probably this is a wrong workflow field mapping".format(field_name)) + field_name = self.jira_fields_to_names.get(field_id, "Unknown name") + logger.debug( + "Could not get field value for field %s. Probably this is a wrong workflow field mapping", field_name + ) field_value = None if field_value is None: return None - value = getattr(field_value, 'value', field_value) + value = getattr(field_value, "value", field_value) if isinstance(value, (list, tuple)): if len(value) == 0: value = None else: - values = [getattr(v, 'name', v) for v in value] + values = [getattr(v, "name", v) for v in value] # is this a `Known Values` attribute? attribute_name = self.fields_to_attributes.get(field_id, None) - if attribute_name not in self.settings['known_values']: + if attribute_name not in self.settings["known_values"]: value = values[0] else: try: - value = next(filter(lambda v: v in values, self.settings['known_values'][attribute_name])) + value = next(filter(lambda v: v in values, self.settings["known_values"][attribute_name])) except StopIteration: value = None @@ -136,11 +146,19 @@ def iter_changes(self, issue, fields): for field in fields: initial_value = self.resolve_field_value(issue, self.field_name_to_id(field)) try: - initial_value = next(filter( - lambda h: h.field == field, - itertools.chain.from_iterable([c.items for c in sorted( - issue.changelog.histories, key=lambda c: dateutil.parser.parse(c.created))]) - )).fromString + initial_value = next( + filter( + lambda h: h.field == field, + itertools.chain.from_iterable( + [ + c.items + for c in sorted( + issue.changelog.histories, key=lambda c: dateutil.parser.parse(c.created) + ) + ] + ), + ) + ).fromString except StopIteration: pass @@ -162,17 +180,17 @@ def iter_changes(self, issue, fields): key=issue.key, date=change_date, from_string=item.fromString, - to_string=item.toString + to_string=item.toString, ) # Basic queries - def find_issues(self, jql, expand='changelog'): + def find_issues(self, jql, expand="changelog"): """Return a list of issues with changelog metadata for the given JQL. """ - max_results = self.settings['max_results'] + max_results = self.settings["max_results"] logger.info("Fetching issues with query `%s`", jql) if max_results: diff --git a/jira_agile_metrics/querymanager_test.py b/jira_agile_metrics/querymanager_test.py index 3e80bba..3886ad3 100644 --- a/jira_agile_metrics/querymanager_test.py +++ b/jira_agile_metrics/querymanager_test.py @@ -1,81 +1,114 @@ -import pytest import datetime -from .conftest import ( - FauxJIRA as JIRA, - FauxIssue as Issue, - FauxChange as Change, - FauxFieldValue as Value -) +import pytest +from .conftest import FauxJIRA as JIRA, FauxIssue as Issue, FauxChange as Change, FauxFieldValue as Value from .querymanager import QueryManager, IssueSnapshot from .utils import extend_dict -@pytest.fixture -def jira(custom_fields): - return JIRA(fields=custom_fields, issues=[ - Issue("A-1", - summary="Issue A-1", - issuetype=Value("Story", "story"), - status=Value("Backlotg", "backlog"), - resolution=None, - created="2018-01-01 01:01:01", - customfield_001="Team 1", - customfield_002=Value(None, 30), - customfield_003=Value(None, ["R2", "R3", "R4"]), - changes=[ - # the changes are not in chrnological order, the first change is intentionally the third - # status change. This is intended to test that we manage get the correct first status change as - # the transition from Backlog to Next - Change("2018-01-03 01:01:01", [("resolution", None, "Closed",), ("status", "Next", "Done",)]), - Change("2018-01-02 01:01:01", [("status", "Backlog", "Next",)]), - Change("2018-01-02 01:01:01", [("Team", "Team 2", "Team 1",)]), - Change("2018-01-04 01:01:01", [("resolution", "Closed", None,), ("status", "Done", "QA",)]), - ], - ) - ]) - -@pytest.fixture -def settings(custom_settings): + +@pytest.fixture(name="jira") +def fixture_jira(custom_fields): + return JIRA( + fields=custom_fields, + issues=[ + Issue( + "A-1", + summary="Issue A-1", + issuetype=Value("Story", "story"), + status=Value("Backlotg", "backlog"), + resolution=None, + created="2018-01-01 01:01:01", + customfield_001="Team 1", + customfield_002=Value(None, 30), + customfield_003=Value(None, ["R2", "R3", "R4"]), + changes=[ + # the changes are not in chrnological order, the first change is intentionally the third + # status change. This is intended to test that we manage get the correct first status change as + # the transition from Backlog to Next + Change("2018-01-03 01:01:01", [("resolution", None, "Closed"), ("status", "Next", "Done")]), + Change("2018-01-02 01:01:01", [("status", "Backlog", "Next")]), + Change("2018-01-02 01:01:01", [("Team", "Team 2", "Team 1")]), + Change("2018-01-04 01:01:01", [("resolution", "Closed", None), ("status", "Done", "QA")]), + ], + ) + ], + ) + + +@pytest.fixture(name="settings") +def fixture_settings(custom_settings): return extend_dict(custom_settings, {}) + def test_search(jira, settings): - qm = QueryManager(jira, settings) - assert qm.attributes_to_fields == { - 'Team': 'customfield_001', - 'Estimate': 'customfield_002', - 'Release': 'customfield_003', + query_manager = QueryManager(jira, settings) + assert query_manager.attributes_to_fields == { + "Team": "customfield_001", + "Estimate": "customfield_002", + "Release": "customfield_003", } - issues = qm.find_issues("(filter=123)") - assert issues == jira._issues + issues = query_manager.find_issues("(filter=123)") + assert issues == jira.issues() + def test_resolve_attribute_value(jira, settings): - qm = QueryManager(jira, settings) - issues = qm.find_issues("(filter=123)") + query_manager = QueryManager(jira, settings) + issues = query_manager.find_issues("(filter=123)") + + assert query_manager.resolve_attribute_value(issues[0], "Team") == "Team 1" + assert query_manager.resolve_attribute_value(issues[0], "Estimate") == 30 + assert query_manager.resolve_attribute_value(issues[0], "Release") == "R3" # due to known_value - assert qm.resolve_attribute_value(issues[0], "Team") == "Team 1" - assert qm.resolve_attribute_value(issues[0], "Estimate") == 30 - assert qm.resolve_attribute_value(issues[0], "Release") == "R3" # due to known_value def test_resolve_field_value(jira, settings): - qm = QueryManager(jira, settings) - issues = qm.find_issues("(filter=123)") + query_manager = QueryManager(jira, settings) + issues = query_manager.find_issues("(filter=123)") + + assert query_manager.resolve_field_value(issues[0], "customfield_001") == "Team 1" + assert query_manager.resolve_field_value(issues[0], "customfield_002") == 30 + assert query_manager.resolve_field_value(issues[0], "customfield_003") == "R3" # due to known_value - assert qm.resolve_field_value(issues[0], "customfield_001") == "Team 1" - assert qm.resolve_field_value(issues[0], "customfield_002") == 30 - assert qm.resolve_field_value(issues[0], "customfield_003") == "R3" # due to known_value def test_iter_changes(jira, settings): - qm = QueryManager(jira, settings) - issues = qm.find_issues("(filter=123)") - changes = list(qm.iter_changes(issues[0], ['status', 'Team'])) + query_manager = QueryManager(jira, settings) + issues = query_manager.find_issues("(filter=123)") + changes = list(query_manager.iter_changes(issues[0], ["status", "Team"])) assert changes == [ - IssueSnapshot(change="status", key="A-1", date=datetime.datetime(2018, 1, 1, 1, 1, 1), from_string=None, to_string="Backlog"), - IssueSnapshot(change="Team", key="A-1", date=datetime.datetime(2018, 1, 1, 1, 1, 1), from_string=None, to_string="Team 2"), - IssueSnapshot(change="status", key="A-1", date=datetime.datetime(2018, 1, 2, 1, 1, 1), from_string="Backlog", to_string="Next"), - IssueSnapshot(change="Team", key="A-1", date=datetime.datetime(2018, 1, 2, 1, 1, 1), from_string="Team 2", to_string="Team 1"), - IssueSnapshot(change="status", key="A-1", date=datetime.datetime(2018, 1, 3, 1, 1, 1), from_string="Next", to_string="Done"), - IssueSnapshot(change="status", key="A-1", date=datetime.datetime(2018, 1, 4, 1, 1, 1), from_string="Done", to_string="QA") + IssueSnapshot( + change="status", + key="A-1", + date=datetime.datetime(2018, 1, 1, 1, 1, 1), + from_string=None, + to_string="Backlog", + ), + IssueSnapshot( + change="Team", key="A-1", date=datetime.datetime(2018, 1, 1, 1, 1, 1), from_string=None, to_string="Team 2" + ), + IssueSnapshot( + change="status", + key="A-1", + date=datetime.datetime(2018, 1, 2, 1, 1, 1), + from_string="Backlog", + to_string="Next", + ), + IssueSnapshot( + change="Team", + key="A-1", + date=datetime.datetime(2018, 1, 2, 1, 1, 1), + from_string="Team 2", + to_string="Team 1", + ), + IssueSnapshot( + change="status", + key="A-1", + date=datetime.datetime(2018, 1, 3, 1, 1, 1), + from_string="Next", + to_string="Done", + ), + IssueSnapshot( + change="status", key="A-1", date=datetime.datetime(2018, 1, 4, 1, 1, 1), from_string="Done", to_string="QA" + ), ] diff --git a/jira_agile_metrics/utils.py b/jira_agile_metrics/utils.py index e3392a7..954477a 100644 --- a/jira_agile_metrics/utils.py +++ b/jira_agile_metrics/utils.py @@ -5,15 +5,18 @@ import pandas as pd import seaborn as sns + class StatusTypes: - backlog = 'backlog' - accepted = 'accepted' - complete = 'complete' + backlog = "backlog" + accepted = "accepted" + complete = "complete" + + +def extend_dict(source_dict, dict_to_add): + extended_dict = source_dict.copy() + extended_dict.update(dict_to_add) + return extended_dict -def extend_dict(d, e): - r = d.copy() - r.update(e) - return r def to_json_string(value): if isinstance(value, pd.Timestamp): @@ -26,21 +29,26 @@ def to_json_string(value): except TypeError: return value + def get_extension(filename): return os.path.splitext(filename)[1].lower() -def to_days_since_epoch(d): - return (d - datetime.date(1970, 1, 1)).days + +def to_days_since_epoch(date): + return (date - datetime.date(1970, 1, 1)).days + def set_chart_context(context): sns.set_context(context) + def set_chart_style(style="whitegrid", despine=True): sns.set_style(style) if despine: sns.despine() -def breakdown_by_month(df, start_column, end_column, key_column, value_column, output_columns=None, aggfunc='count'): + +def breakdown_by_month(df, start_column, end_column, key_column, value_column, output_columns=None, aggfunc="count"): """If `df` is a DataFrame of items that are valid/active between the timestamps stored in `start_column` and `end_column`, and where each item is uniquely identified by `key_column` and has a categorical value in @@ -49,34 +57,31 @@ def breakdown_by_month(df, start_column, end_column, key_column, value_column, o (and order) the value columns, pass a list of valid values as `output_columns`. """ - def build_df(t): - start_date = getattr(t, start_column) - end_date = getattr(t, end_column) - key = getattr(t, key_column) - value = getattr(t, value_column) + def build_df(row): + start_date = getattr(row, start_column) + end_date = getattr(row, end_column) + key = getattr(row, key_column) + value = getattr(row, value_column) if end_date is pd.NaT: end_date = pd.Timestamp.today() - first_month = start_date.normalize().to_period('M').to_timestamp('D', 'S') - last_month = end_date.normalize().to_period('M').to_timestamp('D', 'S') + first_month = start_date.normalize().to_period("M").to_timestamp("D", "S") + last_month = end_date.normalize().to_period("M").to_timestamp("D", "S") - index = pd.date_range(first_month, last_month, freq='MS') + index = pd.date_range(first_month, last_month, freq="MS") - return pd.DataFrame( - index=index, - data=[[key]], - columns=[value] - ) + return pd.DataFrame(index=index, data=[[key]], columns=[value]) - breakdown = pd.concat([build_df(t) for t in df.itertuples()], sort=True).resample('MS').agg(aggfunc) + breakdown = pd.concat([build_df(row) for row in df.itertuples()], sort=True).resample("MS").agg(aggfunc) if output_columns: breakdown = breakdown[[s for s in output_columns if s in breakdown.columns]] return breakdown -def breakdown_by_month_sum_days(df, start_column, end_column, value_column, output_columns=None, aggfunc='sum'): + +def breakdown_by_month_sum_days(df, start_column, end_column, value_column, output_columns=None, aggfunc="sum"): """If `df` is a DataFrame of items that are valid/active between the timestamps stored in `start_column` and `end_column`, and where each has a categorical value in `value_column`, return a new DataFrame summing the @@ -85,33 +90,43 @@ def breakdown_by_month_sum_days(df, start_column, end_column, value_column, outp valid values as `output_columns`. """ - def build_df(t): - start_date = getattr(t, start_column) - end_date = getattr(t, end_column) - value = getattr(t, value_column) + def build_df(row): + start_date = getattr(row, start_column) + end_date = getattr(row, end_column) + value = getattr(row, value_column) if end_date is pd.NaT: end_date = pd.Timestamp.today() - days_range = pd.date_range(start_date, end_date, freq='D') - first_month = start_date.normalize().to_period('M').to_timestamp('D', 'S') - last_month = end_date.normalize().to_period('M').to_timestamp('D', 'S') + days_range = pd.date_range(start_date, end_date, freq="D") + first_month = start_date.normalize().to_period("M").to_timestamp("D", "S") + last_month = end_date.normalize().to_period("M").to_timestamp("D", "S") - index = pd.date_range(first_month, last_month, freq='MS') + index = pd.date_range(first_month, last_month, freq="MS") return pd.DataFrame( index=index, - data=[[len(pd.date_range(month_start, month_start + pd.tseries.offsets.MonthEnd(1), freq='D').intersection(days_range))] for month_start in index], - columns=[value] + data=[ + [ + len( + pd.date_range(month_start, month_start + pd.tseries.offsets.MonthEnd(1), freq="D").intersection( + days_range + ) + ) + ] + for month_start in index + ], + columns=[value], ) - breakdown = pd.concat([build_df(t) for t in df.itertuples()], sort=True).resample('MS').agg(aggfunc) + breakdown = pd.concat([build_df(row) for row in df.itertuples()], sort=True).resample("MS").agg(aggfunc) if output_columns: breakdown = breakdown[[s for s in output_columns if s in breakdown.columns]] return breakdown + def to_bin(value, edges): """Pass a list of numbers in `edges` and return which of them `value` falls between. If < the first item, return (0, ). If > last item, return @@ -119,8 +134,8 @@ def to_bin(value, edges): """ previous = 0 - for v in edges: - if previous <= value <= v: - return (previous, v) - previous = v + for edge in edges: + if previous <= value <= edge: + return (previous, edge) + previous = edge return (previous, None) diff --git a/jira_agile_metrics/utils_test.py b/jira_agile_metrics/utils_test.py index 289a1cf..aa27959 100644 --- a/jira_agile_metrics/utils_test.py +++ b/jira_agile_metrics/utils_test.py @@ -1,4 +1,5 @@ import datetime + import numpy as np import pandas as pd @@ -9,11 +10,13 @@ extend_dict, breakdown_by_month, breakdown_by_month_sum_days, - to_bin + to_bin, ) + def test_extend_dict(): - assert extend_dict({'one': 1}, {'two': 2}) == {'one': 1, 'two': 2} + assert extend_dict({"one": 1}, {"two": 2}) == {"one": 1, "two": 2} + def test_get_extension(): assert get_extension("foo.csv") == ".csv" @@ -22,6 +25,7 @@ def test_get_extension(): assert get_extension("foo") == "" assert get_extension("foo.CSV") == ".csv" + def test_to_json_string(): assert to_json_string(1) == "1" assert to_json_string("foo") == "foo" @@ -30,52 +34,61 @@ def test_to_json_string(): assert to_json_string(pd.NaT) == "" assert to_json_string(pd.Timestamp(2018, 2, 1)) == "2018-02-01" + def test_to_days_since_epoch(): assert to_days_since_epoch(datetime.date(1970, 1, 1)) == 0 assert to_days_since_epoch(datetime.date(1970, 1, 15)) == 14 + def test_breakdown_by_month(): - df = pd.DataFrame([ - {'key': 'ABC-1', 'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-2', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, - {'key': 'ABC-3', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-4', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-5', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, - {'key': 'ABC-6', 'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} - ], columns=['key', 'priority', 'start', 'end']) - - breakdown = breakdown_by_month(df, 'start', 'end', 'key', 'priority', ['low', 'med', 'high']) - assert list(breakdown.columns) == ['med', 'high'] - + df = pd.DataFrame( + [ + {"key": "ABC-1", "priority": "high", "start": pd.Timestamp(2018, 1, 1), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-2", "priority": "med", "start": pd.Timestamp(2018, 1, 2), "end": pd.Timestamp(2018, 1, 20)}, + {"key": "ABC-3", "priority": "high", "start": pd.Timestamp(2018, 2, 3), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-4", "priority": "med", "start": pd.Timestamp(2018, 1, 4), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-5", "priority": "high", "start": pd.Timestamp(2018, 2, 5), "end": pd.Timestamp(2018, 2, 20)}, + {"key": "ABC-6", "priority": "med", "start": pd.Timestamp(2018, 3, 6), "end": pd.Timestamp(2018, 3, 20)}, + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month(df, "start", "end", "key", "priority", ["low", "med", "high"]) + assert list(breakdown.columns) == ["med", "high"] + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [ - {'high': 1, 'med': 2}, - {'high': 3, 'med': 1}, - {'high': 2, 'med': 2}, + assert breakdown.to_dict("records") == [ + {"high": 1, "med": 2}, + {"high": 3, "med": 1}, + {"high": 2, "med": 2}, ] -def test_breakdown_by_month_open_ended(): - df = pd.DataFrame([ - {'key': 'ABC-1', 'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-2', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, - {'key': 'ABC-3', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-4', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-5', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, - {'key': 'ABC-6', 'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': None} - ], columns=['key', 'priority', 'start', 'end']) +def test_breakdown_by_month_open_ended(): - breakdown = breakdown_by_month(df, 'start', 'end', 'key', 'priority', ['low', 'med', 'high']) - assert list(breakdown.columns) == ['med', 'high'] + df = pd.DataFrame( + [ + {"key": "ABC-1", "priority": "high", "start": pd.Timestamp(2018, 1, 1), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-2", "priority": "med", "start": pd.Timestamp(2018, 1, 2), "end": pd.Timestamp(2018, 1, 20)}, + {"key": "ABC-3", "priority": "high", "start": pd.Timestamp(2018, 2, 3), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-4", "priority": "med", "start": pd.Timestamp(2018, 1, 4), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-5", "priority": "high", "start": pd.Timestamp(2018, 2, 5), "end": pd.Timestamp(2018, 2, 20)}, + {"key": "ABC-6", "priority": "med", "start": pd.Timestamp(2018, 3, 6), "end": None}, + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month(df, "start", "end", "key", "priority", ["low", "med", "high"]) + assert list(breakdown.columns) == ["med", "high"] # Note: We will get columns until the current month; assume this test is # run from June onwards ;) - + assert list(breakdown.index)[:5] == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), @@ -83,129 +96,217 @@ def test_breakdown_by_month_open_ended(): pd.Timestamp(2018, 4, 1), pd.Timestamp(2018, 5, 1), ] - assert breakdown.to_dict('records')[:5] == [ - {'high': 1, 'med': 2}, - {'high': 3, 'med': 1}, - {'high': 2, 'med': 2}, - {'high': 0, 'med': 1}, - {'high': 0, 'med': 1}, + assert breakdown.to_dict("records")[:5] == [ + {"high": 1, "med": 2}, + {"high": 3, "med": 1}, + {"high": 2, "med": 2}, + {"high": 0, "med": 1}, + {"high": 0, "med": 1}, ] + def test_breakdown_by_month_no_column_spec(): - df = pd.DataFrame([ - {'key': 'ABC-1', 'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-2', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, - {'key': 'ABC-3', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-4', 'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-5', 'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, - {'key': 'ABC-6', 'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} - ], columns=['key', 'priority', 'start', 'end']) - - breakdown = breakdown_by_month(df, 'start', 'end', 'key', 'priority') - assert list(breakdown.columns) == ['high', 'med'] # alphabetical - + df = pd.DataFrame( + [ + {"key": "ABC-1", "priority": "high", "start": pd.Timestamp(2018, 1, 1), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-2", "priority": "med", "start": pd.Timestamp(2018, 1, 2), "end": pd.Timestamp(2018, 1, 20)}, + {"key": "ABC-3", "priority": "high", "start": pd.Timestamp(2018, 2, 3), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-4", "priority": "med", "start": pd.Timestamp(2018, 1, 4), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-5", "priority": "high", "start": pd.Timestamp(2018, 2, 5), "end": pd.Timestamp(2018, 2, 20)}, + {"key": "ABC-6", "priority": "med", "start": pd.Timestamp(2018, 3, 6), "end": pd.Timestamp(2018, 3, 20)}, + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month(df, "start", "end", "key", "priority") + assert list(breakdown.columns) == ["high", "med"] # alphabetical + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [ - {'high': 1, 'med': 2}, - {'high': 3, 'med': 1}, - {'high': 2, 'med': 2}, + assert breakdown.to_dict("records") == [ + {"high": 1, "med": 2}, + {"high": 3, "med": 1}, + {"high": 2, "med": 2}, ] -def test_breakdown_by_month_none_values(): - df = pd.DataFrame([ - {'key': 'ABC-1', 'priority': None, 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-2', 'priority': None, 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, - {'key': 'ABC-3', 'priority': None, 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-4', 'priority': None, 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, - {'key': 'ABC-5', 'priority': None, 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, - {'key': 'ABC-6', 'priority': None, 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} - ], columns=['key', 'priority', 'start', 'end']) +def test_breakdown_by_month_none_values(): - breakdown = breakdown_by_month(df, 'start', 'end', 'key', 'priority') + df = pd.DataFrame( + [ + {"key": "ABC-1", "priority": None, "start": pd.Timestamp(2018, 1, 1), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-2", "priority": None, "start": pd.Timestamp(2018, 1, 2), "end": pd.Timestamp(2018, 1, 20)}, + {"key": "ABC-3", "priority": None, "start": pd.Timestamp(2018, 2, 3), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-4", "priority": None, "start": pd.Timestamp(2018, 1, 4), "end": pd.Timestamp(2018, 3, 20)}, + {"key": "ABC-5", "priority": None, "start": pd.Timestamp(2018, 2, 5), "end": pd.Timestamp(2018, 2, 20)}, + {"key": "ABC-6", "priority": None, "start": pd.Timestamp(2018, 3, 6), "end": pd.Timestamp(2018, 3, 20)}, + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month(df, "start", "end", "key", "priority") assert list(breakdown.columns) == [None] - + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [{None: 3}, {None: 4}, {None: 4}] + assert breakdown.to_dict("records") == [{None: 3}, {None: 4}, {None: 4}] + def test_breakdown_by_month_sum_days(): - df = pd.DataFrame([ - {'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 31 Feb: 28 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, # Jan: 19 Feb: 0 Mar: 0 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 0 Feb: 26 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 28 Feb: 28 Mar: 20 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, # Jan: 0 Feb: 16 Mar: 0 - {'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} # Jan: 0 Feb: 0 Mar: 15 - ], columns=['key', 'priority', 'start', 'end']) - - breakdown = breakdown_by_month_sum_days(df, 'start', 'end', 'priority', ['low', 'med', 'high']) - assert list(breakdown.columns) == ['med', 'high'] - + df = pd.DataFrame( + [ + { + "priority": "high", + "start": pd.Timestamp(2018, 1, 1), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 31 Feb: 28 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 2), + "end": pd.Timestamp(2018, 1, 20), + }, # Jan: 19 Feb: 0 Mar: 0 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 3), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 26 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 4), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 28 Feb: 28 Mar: 20 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 5), + "end": pd.Timestamp(2018, 2, 20), + }, # Jan: 0 Feb: 16 Mar: 0 + { + "priority": "med", + "start": pd.Timestamp(2018, 3, 6), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 0 Mar: 15 + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month_sum_days(df, "start", "end", "priority", ["low", "med", "high"]) + assert list(breakdown.columns) == ["med", "high"] + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [ - {'high': 31.0, 'med': 47.0}, - {'high': 70.0, 'med': 28.0}, - {'high': 40.0, 'med': 35.0} + assert breakdown.to_dict("records") == [ + {"high": 31.0, "med": 47.0}, + {"high": 70.0, "med": 28.0}, + {"high": 40.0, "med": 35.0}, ] + def test_breakdown_by_month_sum_days_no_column_spec(): - df = pd.DataFrame([ - {'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 31 Feb: 28 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, # Jan: 19 Feb: 0 Mar: 0 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 0 Feb: 26 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 28 Feb: 28 Mar: 20 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, # Jan: 0 Feb: 16 Mar: 0 - {'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} # Jan: 0 Feb: 0 Mar: 15 - ], columns=['key', 'priority', 'start', 'end']) - - breakdown = breakdown_by_month_sum_days(df, 'start', 'end', 'priority') - assert list(breakdown.columns) == ['high', 'med'] # alphabetical - + df = pd.DataFrame( + [ + { + "priority": "high", + "start": pd.Timestamp(2018, 1, 1), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 31 Feb: 28 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 2), + "end": pd.Timestamp(2018, 1, 20), + }, # Jan: 19 Feb: 0 Mar: 0 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 3), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 26 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 4), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 28 Feb: 28 Mar: 20 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 5), + "end": pd.Timestamp(2018, 2, 20), + }, # Jan: 0 Feb: 16 Mar: 0 + { + "priority": "med", + "start": pd.Timestamp(2018, 3, 6), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 0 Mar: 15 + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month_sum_days(df, "start", "end", "priority") + assert list(breakdown.columns) == ["high", "med"] # alphabetical + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [ - {'high': 31.0, 'med': 47.0}, - {'high': 70.0, 'med': 28.0}, - {'high': 40.0, 'med': 35.0} + assert breakdown.to_dict("records") == [ + {"high": 31.0, "med": 47.0}, + {"high": 70.0, "med": 28.0}, + {"high": 40.0, "med": 35.0}, ] -def test_breakdown_by_month_sum_day_open_ended(): - df = pd.DataFrame([ - {'priority': 'high', 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 31 Feb: 28 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, # Jan: 19 Feb: 0 Mar: 0 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 0 Feb: 26 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 28 Feb: 28 Mar: 20 - {'priority': 'high', 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, # Jan: 0 Feb: 16 Mar: 0 - {'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': None} # Jan: 0 Feb: 0 Mar: 26 - ], columns=['key', 'priority', 'start', 'end']) +def test_breakdown_by_month_sum_day_open_ended(): - breakdown = breakdown_by_month_sum_days(df, 'start', 'end', 'priority', ['low', 'med', 'high']) - assert list(breakdown.columns) == ['med', 'high'] + df = pd.DataFrame( + [ + { + "priority": "high", + "start": pd.Timestamp(2018, 1, 1), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 31 Feb: 28 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 2), + "end": pd.Timestamp(2018, 1, 20), + }, # Jan: 19 Feb: 0 Mar: 0 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 3), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 26 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 4), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 28 Feb: 28 Mar: 20 + { + "priority": "high", + "start": pd.Timestamp(2018, 2, 5), + "end": pd.Timestamp(2018, 2, 20), + }, # Jan: 0 Feb: 16 Mar: 0 + {"priority": "med", "start": pd.Timestamp(2018, 3, 6), "end": None}, # Jan: 0 Feb: 0 Mar: 26 + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month_sum_days(df, "start", "end", "priority", ["low", "med", "high"]) + assert list(breakdown.columns) == ["med", "high"] # Note: We will get columns until the current month; assume this test is # run from June onwards ;) - + assert list(breakdown.index)[:5] == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), @@ -213,40 +314,69 @@ def test_breakdown_by_month_sum_day_open_ended(): pd.Timestamp(2018, 4, 1), pd.Timestamp(2018, 5, 1), ] - assert breakdown.to_dict('records')[:5] == [ - {'high': 31.0, 'med': 47.0}, - {'high': 70.0, 'med': 28.0}, - {'high': 40.0, 'med': 46.0}, - {'high': 0, 'med': 30.0}, - {'high': 0, 'med': 31.0}, + assert breakdown.to_dict("records")[:5] == [ + {"high": 31.0, "med": 47.0}, + {"high": 70.0, "med": 28.0}, + {"high": 40.0, "med": 46.0}, + {"high": 0, "med": 30.0}, + {"high": 0, "med": 31.0}, ] + def test_breakdown_by_month_sum_days_none_values(): - df = pd.DataFrame([ - {'priority': None, 'start': pd.Timestamp(2018, 1, 1), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 31 Feb: 28 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 2), 'end': pd.Timestamp(2018, 1, 20)}, # Jan: 19 Feb: 0 Mar: 0 - {'priority': None, 'start': pd.Timestamp(2018, 2, 3), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 0 Feb: 26 Mar: 20 - {'priority': 'med', 'start': pd.Timestamp(2018, 1, 4), 'end': pd.Timestamp(2018, 3, 20)}, # Jan: 28 Feb: 28 Mar: 20 - {'priority': None, 'start': pd.Timestamp(2018, 2, 5), 'end': pd.Timestamp(2018, 2, 20)}, # Jan: 0 Feb: 16 Mar: 0 - {'priority': 'med', 'start': pd.Timestamp(2018, 3, 6), 'end': pd.Timestamp(2018, 3, 20)} # Jan: 0 Feb: 0 Mar: 15 - ], columns=['key', 'priority', 'start', 'end']) - - breakdown = breakdown_by_month_sum_days(df, 'start', 'end', 'priority') - assert list(breakdown.columns) == [None, 'med'] - + df = pd.DataFrame( + [ + { + "priority": None, + "start": pd.Timestamp(2018, 1, 1), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 31 Feb: 28 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 2), + "end": pd.Timestamp(2018, 1, 20), + }, # Jan: 19 Feb: 0 Mar: 0 + { + "priority": None, + "start": pd.Timestamp(2018, 2, 3), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 26 Mar: 20 + { + "priority": "med", + "start": pd.Timestamp(2018, 1, 4), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 28 Feb: 28 Mar: 20 + { + "priority": None, + "start": pd.Timestamp(2018, 2, 5), + "end": pd.Timestamp(2018, 2, 20), + }, # Jan: 0 Feb: 16 Mar: 0 + { + "priority": "med", + "start": pd.Timestamp(2018, 3, 6), + "end": pd.Timestamp(2018, 3, 20), + }, # Jan: 0 Feb: 0 Mar: 15 + ], + columns=["key", "priority", "start", "end"], + ) + + breakdown = breakdown_by_month_sum_days(df, "start", "end", "priority") + assert list(breakdown.columns) == [None, "med"] + assert list(breakdown.index) == [ pd.Timestamp(2018, 1, 1), pd.Timestamp(2018, 2, 1), pd.Timestamp(2018, 3, 1), ] - assert breakdown.to_dict('records') == [ - {None: 31.0, 'med': 47.0}, - {None: 70.0, 'med': 28.0}, - {None: 40.0, 'med': 35.0} + assert breakdown.to_dict("records") == [ + {None: 31.0, "med": 47.0}, + {None: 70.0, "med": 28.0}, + {None: 40.0, "med": 35.0}, ] + def test_to_bin(): assert to_bin(0, [10, 20, 30]) == (0, 10) @@ -257,5 +387,5 @@ def test_to_bin(): assert to_bin(20, [10, 20, 30]) == (10, 20) assert to_bin(30, [10, 20, 30]) == (20, 30) - + assert to_bin(31, [10, 20, 30]) == (30, None) diff --git a/jira_agile_metrics/webapp/app.py b/jira_agile_metrics/webapp/app.py index e67850d..6818c90 100644 --- a/jira_agile_metrics/webapp/app.py +++ b/jira_agile_metrics/webapp/app.py @@ -1,83 +1,81 @@ -import logging +import base64 import contextlib import io +import logging import os import os.path import shutil import tempfile -import base64 import zipfile -import jinja2 from flask import Flask, render_template, request -from jira import JIRA +import jinja2 +from jira import JIRA, JIRAError +from ..calculator import run_calculators from ..config import config_to_options, CALCULATORS, ConfigError from ..querymanager import QueryManager -from ..calculator import run_calculators + template_folder = os.path.join(os.path.dirname(__file__), "templates") static_folder = os.path.join(os.path.dirname(__file__), "static") -app = Flask('jira-agile-metrics', - template_folder=template_folder, - static_folder=static_folder -) +app = Flask("jira-agile-metrics", template_folder=template_folder, static_folder=static_folder) -app.jinja_loader = jinja2.PackageLoader('jira_agile_metrics.webapp', 'templates') +app.jinja_loader = jinja2.PackageLoader("jira_agile_metrics.webapp", "templates") logger = logging.getLogger(__name__) + @app.route("/") def index(): - return render_template('index.html', max_results=request.args.get('max_results', "")) + return render_template("index.html", max_results=request.args.get("max_results", "")) + -@app.route("/run", methods=['POST']) +@app.route("/run", methods=["POST"]) def run(): - config = request.files['config'] - + config = request.files["config"] + data = "" has_error = False log_buffer = io.StringIO() with capture_log(log_buffer, logging.DEBUG, "%(levelname)s: %(message)s"): - + # We swallow exceptions here because we want to show them in the output # log on the result page. try: options = config_to_options(config.read()) - override_options(options['connection'], request.form) - + override_options(options["connection"], request.form) + # We allow a `max_results` query string parameter for faster debugging - if request.form.get('max_results'): + if request.form.get("max_results"): try: - options['settings']['max_results'] = int(request.form.get('max_results')) + options["settings"]["max_results"] = int(request.form.get("max_results")) except ValueError: - options['settings']['max_results'] = None - - jira = get_jira_client(options['connection']) - query_manager = QueryManager(jira, options['settings']) - zip_data = get_archive(CALCULATORS, query_manager, options['settings']) - data = base64.b64encode(zip_data).decode('ascii') - except Exception as e: - logger.error("%s", e) + options["settings"]["max_results"] = None + + jira = get_jira_client(options["connection"]) + query_manager = QueryManager(jira, options["settings"]) + zip_data = get_archive(CALCULATORS, query_manager, options["settings"]) + data = base64.b64encode(zip_data).decode("ascii") + except Exception as ex: + logger.error("%s", ex) has_error = True - return render_template('results.html', - data=data, - has_error=has_error, - log=log_buffer.getvalue() - ) + return render_template("results.html", data=data, has_error=has_error, log=log_buffer.getvalue()) + # Helpers + @contextlib.contextmanager def capture_log(buffer, level, formatter=None): """Temporarily write log output to the StringIO `buffer` with log level threshold `level`, before returning logging to normal. """ root_logger = logging.getLogger() - + old_level = root_logger.getEffectiveLevel() root_logger.setLevel(level) @@ -97,6 +95,7 @@ def capture_log(buffer, level, formatter=None): handler.flush() buffer.flush() + def override_options(options, form): """Override options from the configuration files with form data where applicable. @@ -105,33 +104,35 @@ def override_options(options, form): if key in form and form[key] != "": options[key] = form[key] + def get_jira_client(connection): - """Create a JIRA client with the given connection options - """ + """Create a JIRA client with the given connection options""" - url = connection['domain'] - username = connection['username'] - password = connection['password'] - jira_client_options = connection['jira_client_options'] - jira_server_version_check = connection['jira_server_version_check'] + url = connection["domain"] + username = connection["username"] + password = connection["password"] + jira_client_options = connection["jira_client_options"] + jira_server_version_check = connection["jira_server_version_check"] - jira_options = {'server': url} + jira_options = {"server": url} jira_options.update(jira_client_options) try: return JIRA(jira_options, basic_auth=(username, password), get_server_info=jira_server_version_check) - except Exception as e: - if e.status_code == 401: - raise ConfigError("JIRA authentication failed. Check URL and credentials, and ensure the account is not locked.") from None - else: - raise + except JIRAError as ex: + if ex.status_code == 401: + raise ConfigError( + "JIRA authentication failed. Check URL and credentials, and ensure the account is not locked." + ) from None + raise + def get_archive(calculators, query_manager, settings): """Run all calculators and write outputs to a temporary directory. Create a zip archive of all the files written, and return it as a bytes array. Remove the temporary directory on completion. """ - zip_data = b'' + zip_data = b"" cwd = os.getcwd() temp_path = tempfile.mkdtemp() @@ -140,16 +141,16 @@ def get_archive(calculators, query_manager, settings): os.chdir(temp_path) run_calculators(calculators, query_manager, settings) - with zipfile.ZipFile('metrics.zip', 'w', zipfile.ZIP_STORED) as z: - for root, dirs, files in os.walk(temp_path): + with zipfile.ZipFile("metrics.zip", "w", zipfile.ZIP_STORED) as file: + for root, _, files in os.walk(temp_path): for file_name in files: - if file_name != 'metrics.zip': - z.write(os.path.join(root, file_name), os.path.join('metrics', file_name)) - with open('metrics.zip', 'rb') as metrics_zip: + if file_name != "metrics.zip": + file.write(os.path.join(root, file_name), os.path.join("metrics", file_name)) + with open("metrics.zip", "rb") as metrics_zip: zip_data = metrics_zip.read() finally: os.chdir(cwd) shutil.rmtree(temp_path) - + return zip_data diff --git a/pyproject.toml b/pyproject.toml index 5715654..4321be7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [tool.black] -line-length = 79 +line-length = 120 include = '\.pyi?$' exclude = ''' /( @@ -15,4 +15,7 @@ exclude = ''' | dist | venv )/ -''' \ No newline at end of file +''' + +[tool.pylint.'MESSAGES CONTROL'] +max-line-length = 120 \ No newline at end of file diff --git a/setup.py b/setup.py index 23fc004..8545a72 100644 --- a/setup.py +++ b/setup.py @@ -1,39 +1,37 @@ -from setuptools import setup, find_packages from codecs import open from os import path +from setuptools import setup, find_packages + + here = path.abspath(path.dirname(__file__)) # Get the long description from the README file -with open(path.join(here, 'README.md'), encoding='utf-8') as f: +with open(path.join(here, "README.md"), encoding="utf-8") as f: long_description = f.read() -with open(path.join(here, 'requirements.txt')) as f: +with open(path.join(here, "requirements.txt")) as f: install_requires = f.readlines() setup( - name='jira-agile-metrics', - version='0.25', - description='Agile metrics and summary data extracted from JIRA', + name="jira-agile-metrics", + version="0.25", + description="Agile metrics and summary data extracted from JIRA", long_description=long_description, long_description_content_type="text/markdown", - author='Martin Aspeli', - author_email='optilude@gmail.com', - url='https://github.com/optilude/jira-agile-metrics', - license='MIT', - keywords='agile jira analytics metrics', - packages=find_packages(exclude=['contrib', 'docs', 'tests*']), + author="Martin Aspeli", + author_email="optilude@gmail.com", + url="https://github.com/optilude/jira-agile-metrics", + license="MIT", + keywords="agile jira analytics metrics", + packages=find_packages(exclude=["contrib", "docs", "tests*"]), install_requires=install_requires, - setup_requires=['pytest-runner'], - tests_require=['pytest'], + setup_requires=["pytest-runner"], + tests_require=["pytest"], include_package_data=True, package_data={ - 'jira_agile_metrics.webapp': ['templates/*.*', 'static/*.*'], - 'jira_agile_metrics.calculators': ['*.html'], - }, - entry_points={ - 'console_scripts': [ - 'jira-agile-metrics=jira_agile_metrics.cli:main', - ], + "jira_agile_metrics.webapp": ["templates/*.*", "static/*.*"], + "jira_agile_metrics.calculators": ["*.html"], }, + entry_points={"console_scripts": ["jira-agile-metrics=jira_agile_metrics.cli:main"]}, )