diff --git a/CHANGELOG.md b/CHANGELOG.md index 42b9757..f0720fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,27 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## UNRELEASED +### Added + - Docker buildx recipes and scripts + - `uv`support in Docker builds to improve reproducibility and speed of building + - log-normal distribution + - quantiles (p95,p98,p99) + - Lilliefors fitness test against both normal and log-normal distributions + - data points can now be recorded with new option (`-d`); data points can be extracted to a spreadsheet using the new utility `datapoints2xlsx` + - adds support for RSA PSS signature testing + - adds support for `C_FindObjects()` testing + +### Changed + - using high resolution clocks and steady clocks from standard library, to improve precision of measurement in all environments (including VMs) + - introduces the use of units for automatic conversion and improved readability + - graphs improvements: latency percentiles have now a scale distinct from average, coloured curves and axis, coloured horizontal lines + - flow: when a key generation fails, the corresponding test cases are removed from the test cases, and the application no longer terminates + - `C_SeedRandom()` test case uses random numbers for the seed + +### Fixed + - compilation warning under CLANG removed (using `std::abs` instead of `abs`) + - updated python dependencies to fix reported vulnerabilities ## 3.15.1 - 2025-11-26 ### Fixed diff --git a/P11PERFTEST_VERSION b/P11PERFTEST_VERSION index c3df54c..b9abac4 100644 --- a/P11PERFTEST_VERSION +++ b/P11PERFTEST_VERSION @@ -1 +1 @@ -3.15.1 +3.16.0-BETA \ No newline at end of file diff --git a/buildx/Dockerfile.alpine321 b/buildx/Dockerfile.alpine321 index f691510..75ed16d 100644 --- a/buildx/Dockerfile.alpine321 +++ b/buildx/Dockerfile.alpine321 @@ -144,7 +144,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -157,7 +158,7 @@ RUN make distclean \ && make install-strip DESTDIR=/apk_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /apk_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /apk_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /apk_build/usr/share/doc/p11perftest \ diff --git a/buildx/Dockerfile.amzn2023 b/buildx/Dockerfile.amzn2023 index 0842daa..17f96e4 100644 --- a/buildx/Dockerfile.amzn2023 +++ b/buildx/Dockerfile.amzn2023 @@ -134,7 +134,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -147,7 +148,7 @@ RUN make distclean \ && make install-strip DESTDIR=/rpm_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /rpm_build/usr/share/doc/p11perftest \ diff --git a/buildx/Dockerfile.deb12 b/buildx/Dockerfile.deb12 index 08eadaf..0005ca8 100644 --- a/buildx/Dockerfile.deb12 +++ b/buildx/Dockerfile.deb12 @@ -46,7 +46,6 @@ RUN apt-get update && apt-get install -y \ libboost-dev \ libboost-program-options-dev \ libboost-system-dev \ - libboost-timer-dev \ libssl-dev \ && rm -rf /var/lib/apt/lists/* @@ -129,7 +128,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -142,7 +142,7 @@ RUN make distclean \ && make install-strip DESTDIR=/deb_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /deb_build/usr/share/doc/p11perftest \ @@ -174,7 +174,7 @@ RUN . /meta/env \ && echo "Section: misc" >> DEBIAN/control \ && echo "Priority: optional" >> DEBIAN/control \ && echo "Architecture: $PKG_ARCH" >> DEBIAN/control \ - && echo "Depends: libc6,libboost-chrono1.74.0,libboost-timer1.74.0,libboost-program-options1.74.0" >> DEBIAN/control \ + && echo "Depends: libc6,libboost-program-options1.74.0" >> DEBIAN/control \ && echo "Maintainer: $PKG_MAINTAINER" >> DEBIAN/control \ && echo "Description: A utility for testing PKCS#11 implementations" >> DEBIAN/control diff --git a/buildx/Dockerfile.deb13 b/buildx/Dockerfile.deb13 index 95df471..e31cb60 100644 --- a/buildx/Dockerfile.deb13 +++ b/buildx/Dockerfile.deb13 @@ -46,7 +46,6 @@ RUN apt-get update && apt-get install -y \ libboost-dev \ libboost-program-options-dev \ libboost-system-dev \ - libboost-timer-dev \ libssl-dev \ && rm -rf /var/lib/apt/lists/* @@ -129,7 +128,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -142,7 +142,7 @@ RUN make distclean \ && make install-strip DESTDIR=/deb_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /deb_build/usr/share/doc/p11perftest \ @@ -174,7 +174,7 @@ RUN . /meta/env \ && echo "Section: misc" >> DEBIAN/control \ && echo "Priority: optional" >> DEBIAN/control \ && echo "Architecture: $PKG_ARCH" >> DEBIAN/control \ - && echo "Depends: libc6,libboost-chrono1.83.0,libboost-timer1.83.0,libboost-program-options1.83.0" >> DEBIAN/control \ + && echo "Depends: libc6,libboost-program-options1.83.0" >> DEBIAN/control \ && echo "Maintainer: $PKG_MAINTAINER" >> DEBIAN/control \ && echo "Description: A utility for testing PKCS#11 implementations" >> DEBIAN/control diff --git a/buildx/Dockerfile.ol8 b/buildx/Dockerfile.ol8 index a715c40..e575a38 100644 --- a/buildx/Dockerfile.ol8 +++ b/buildx/Dockerfile.ol8 @@ -137,7 +137,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -150,7 +151,7 @@ RUN make distclean \ && make install-strip DESTDIR=/rpm_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /rpm_build/usr/share/doc/p11perftest \ diff --git a/buildx/Dockerfile.ol9 b/buildx/Dockerfile.ol9 index a671281..6ebdf38 100644 --- a/buildx/Dockerfile.ol9 +++ b/buildx/Dockerfile.ol9 @@ -138,7 +138,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -151,7 +152,7 @@ RUN make distclean \ && make install-strip DESTDIR=/rpm_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /rpm_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /rpm_build/usr/share/doc/p11perftest \ diff --git a/buildx/Dockerfile.ubuntu2204 b/buildx/Dockerfile.ubuntu2204 index eb34dc7..64649eb 100644 --- a/buildx/Dockerfile.ubuntu2204 +++ b/buildx/Dockerfile.ubuntu2204 @@ -46,7 +46,6 @@ RUN apt-get update && apt-get install -y \ libboost-dev \ libboost-program-options-dev \ libboost-system-dev \ - libboost-timer-dev \ libssl-dev \ && rm -rf /var/lib/apt/lists/* @@ -130,7 +129,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -143,7 +143,7 @@ RUN make distclean \ && make install-strip DESTDIR=/deb_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /deb_build/usr/share/doc/p11perftest \ @@ -175,7 +175,7 @@ RUN . /meta/env \ && echo "Section: misc" >> DEBIAN/control \ && echo "Priority: optional" >> DEBIAN/control \ && echo "Architecture: $PKG_ARCH" >> DEBIAN/control \ - && echo "Depends: libc6,libboost-chrono1.74.0,libboost-timer1.74.0,libboost-program-options1.74.0" >> DEBIAN/control \ + && echo "Depends: libc6,libboost-program-options1.74.0" >> DEBIAN/control \ && echo "Maintainer: $PKG_MAINTAINER" >> DEBIAN/control \ && echo "Description: A utility for testing PKCS#11 implementations" >> DEBIAN/control diff --git a/buildx/Dockerfile.ubuntu2404 b/buildx/Dockerfile.ubuntu2404 index 052ea1a..2237f6d 100644 --- a/buildx/Dockerfile.ubuntu2404 +++ b/buildx/Dockerfile.ubuntu2404 @@ -47,7 +47,6 @@ RUN apt-get update && apt-get install -y \ libboost-dev \ libboost-program-options-dev \ libboost-system-dev \ - libboost-timer-dev \ libssl-dev \ && rm -rf /var/lib/apt/lists/* @@ -133,7 +132,8 @@ RUN uv venv RUN uv pip install -r requirements.txt \ && uv run pyinstaller scripts/json2xlsx.spec \ && uv run pyinstaller scripts/gengraphs.spec \ - && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs + && uv run pyinstaller scripts/datapoints2xlsx.spec \ + && install -s -m 755 -t /tar_build/usr/local/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /tar_build/usr/local/share/doc/p11perftest \ @@ -146,7 +146,7 @@ RUN make distclean \ && make install-strip DESTDIR=/deb_build # Build the python executables using pyinstaller -RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs +RUN install -s -m 755 -t /deb_build/usr/bin dist/json2xlsx dist/gengraphs dist/datapoints2xlsx # Install documentation RUN mkdir -p /deb_build/usr/share/doc/p11perftest \ @@ -178,7 +178,7 @@ RUN . /meta/env \ && echo "Section: misc" >> DEBIAN/control \ && echo "Priority: optional" >> DEBIAN/control \ && echo "Architecture: $PKG_ARCH" >> DEBIAN/control \ - && echo "Depends: libc6,libboost-chrono1.83.0,libboost-timer1.83.0,libboost-program-options1.83.0" >> DEBIAN/control \ + && echo "Depends: libc6,libboost-program-options1.83.0" >> DEBIAN/control \ && echo "Maintainer: $PKG_MAINTAINER" >> DEBIAN/control \ && echo "Description: A utility for testing PKCS#11 implementations" >> DEBIAN/control diff --git a/configure.ac b/configure.ac index 41942ea..ba7843b 100644 --- a/configure.ac +++ b/configure.ac @@ -63,10 +63,8 @@ AC_CONFIG_HEADERS([config.h]) dnl Check for libraries, headers, data etc here. AC_SEARCH_LIBS([dlopen], [dl dld], [], [AC_MSG_FAILURE([can't find dynamic linker lib])]) AX_PTHREAD(,[AC_MSG_ERROR[pthread is required to compile this project]]) -AX_BOOST_BASE([1.62],, [AC_MSG_ERROR([p11perftest needs Boost, but it was not found in your system])]) +AX_BOOST_BASE([1.66],, [AC_MSG_ERROR([p11perftest needs Boost, but it was not found in your system])]) AX_BOOST_PROGRAM_OPTIONS() -AX_BOOST_TIMER() -AX_BOOST_CHRONO() PKG_CHECK_MODULES([BOTAN], [ botan-2 > 2.17.0 ]) PKG_CHECK_MODULES([LIBCRYPTO], [ libcrypto > 1 ]) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index ad8f866..e50ca4d 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -16,6 +16,7 @@ ACLOCAL_AMFLAGS = -I m4 -bin_SCRIPTS = createkeys.sh generatekeys.py json2xlsx.py +bin_SCRIPTS = createkeys.sh generatekeys.py json2xlsx.py datapoints2xlsx.py -EXTRA_DIST = createkeys.sh generatekeys.py json2xlsx.py +EXTRA_DIST = createkeys.sh generatekeys.py json2xlsx.py datapoints2xlsx.py \ + generatekeys.spec json2xlsx.spec datapoints2xlsx.spec diff --git a/scripts/datapoints2xlsx.py b/scripts/datapoints2xlsx.py new file mode 100755 index 0000000..c63496b --- /dev/null +++ b/scripts/datapoints2xlsx.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 + +# +# Copyright (c) 2025 Mastercard +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# datapoints2xlsx.py will extract datapoints from JSON files and create an Excel spreadsheet. +# Each column contains the datapoints for a specific test case/key/vector combination. +# Columns are grouped with a wide title row. +# + +import json +import xlsxwriter +import sys +import argparse + + +def retrieve_datapoints(listofjsons): + """ + Generator that yields tuples of (filename, testcase, key, vectorname, datapoints_array) + for each test case that contains datapoints. + """ + for f in listofjsons: + try: + testcases = json.loads(f.read()) + + # Handle format with thread grouping ('* thread-s' keys) + if list(testcases.keys())[0].endswith('thread-s'): + for threadgroupname, threadgroup in testcases.items(): + for testcase, keys in threadgroup.items(): + for key, vectors in keys.items(): + for vectorname, vector in vectors.items(): + if isinstance(vector, dict) and 'datapoints' in vector and isinstance(vector['datapoints'], list): + yield f.name, threadgroupname, testcase, key, vectorname, vector['datapoints'] + # Legacy format without thread grouping + else: + for testcase, keys in testcases.items(): + for key, vectors in keys.items(): + # Check if this level contains datapoints directly (flat structure) + if isinstance(vectors, dict) and 'datapoints' in vectors and isinstance(vectors['datapoints'], list): + yield f.name, None, testcase, key, key, vectors['datapoints'] + # Otherwise, iterate through vector names + elif isinstance(vectors, dict): + for vectorname, vector in vectors.items(): + if isinstance(vector, dict) and 'datapoints' in vector and isinstance(vector['datapoints'], list): + yield f.name, None, testcase, key, vectorname, vector['datapoints'] + + except Exception as e: + print(f"*** got an error while processing {f.name}: \"{e}\", skipping that file") + + +class DatapointsConverter: + def __init__(self, toxlsx): + self.toxlsx = toxlsx + self.col = 0 + self.columns = [] + + def __enter__(self): + self.workbook = xlsxwriter.Workbook(self.toxlsx, options={'nan_inf_to_errors': True}) + self.worksheet = self.workbook.add_worksheet('latency (ms)') + + # Format for the header row + self.header_format = self.workbook.add_format({ + 'bold': True, + 'bg_color': '#D7E4BC', + 'border': 1 + }) + + return self + + def __exit__(self, type, value, traceback): + self.workbook.close() + + def add_datapoints_column(self, filename, threadgroup, testcase, key, vectorname, datapoints): + """ + Add a column of datapoints to the spreadsheet. + Row 0: Column header with test identification + Row 1+: Individual datapoint values + """ + # Create the column header + if threadgroup: + column_header = f"{filename} | {threadgroup} | {testcase} | {key} | {vectorname}" + else: + column_header = f"{filename} | {testcase} | {key} | {vectorname}" + + # Write the column header in row 0 + self.worksheet.write(0, self.col, column_header, self.header_format) + + # Write the datapoints starting from row 1 + for idx, datapoint in enumerate(datapoints): + self.worksheet.write_number(1 + idx, self.col, float(datapoint)) + + # Store column info for potential table creation + self.columns.append({ + 'header': column_header, + 'count': len(datapoints) + }) + + # Move to next column + self.col += 1 + + def finalize(self): + """ + Adjust column widths for better readability. + """ + # Set a reasonable width for all columns + for col_idx in range(self.col): + self.worksheet.set_column(col_idx, col_idx, 20) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Extract datapoints from p11perftest JSON files to Excel spreadsheet format', + epilog='Each column contains the datapoints for one test case. ' + 'The first row shows the grouped title with test identification.' + ) + parser.add_argument('input', metavar='JSONFILE', + help='Path to JSON input file(s) containing datapoints', + nargs='+', + type=argparse.FileType('r')) + parser.add_argument('output', metavar='XLSXFILE', + help='Path to Excel XLSX spreadsheet file') + args = parser.parse_args() + + numcols = 0 + total_datapoints = 0 + + with DatapointsConverter(args.output) as converter: + for datapoint_info in retrieve_datapoints(args.input): + filename, threadgroup, testcase, key, vectorname, datapoints = datapoint_info + converter.add_datapoints_column(filename, threadgroup, testcase, key, vectorname, datapoints) + numcols += 1 + total_datapoints += len(datapoints) + + converter.finalize() + + if numcols == 0: + print(f"Warning: No datapoints found in input files. The output file may be empty.") + else: + print(f"Extracted {total_datapoints} datapoints from {numcols} test case(s) to {args.output}") diff --git a/scripts/datapoints2xlsx.spec b/scripts/datapoints2xlsx.spec new file mode 100644 index 0000000..2232613 --- /dev/null +++ b/scripts/datapoints2xlsx.spec @@ -0,0 +1,59 @@ +# -*- mode: python ; coding: utf-8 -*- + +# +# Copyright (c) 2025 Mastercard +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +block_cipher = None + + +a = Analysis( + ['datapoints2xlsx.py'], + pathex=[], + binaries=[], + datas=[], + hiddenimports=[], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, +) +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + [], + name='datapoints2xlsx', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + upx_exclude=[], + runtime_tmpdir=None, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) diff --git a/scripts/gengraphs.py b/scripts/gengraphs.py index de2c794..09c2d96 100755 --- a/scripts/gengraphs.py +++ b/scripts/gengraphs.py @@ -18,13 +18,42 @@ # import argparse +import signal +import sys +import os +import multiprocessing as mp import pandas as pd import matplotlib matplotlib.use('Agg') # use a non-interactive backend import matplotlib.pyplot as plt +from matplotlib.ticker import MaxNLocator import numpy as np from scipy.optimize import curve_fit +# Global variables for multiprocess-safe file tracking +manager = mp.Manager() +current_files = manager.list() +files_lock = manager.Lock() +graphs_created = manager.Value('i', 0) # Shared counter for created graphs + + +def signal_handler(_sig, _frame): + """Handle CTRL-C gracefully by cleaning up incomplete files""" + print('\n\nInterrupted! Cleaning up incomplete files...', file=sys.stderr) + with files_lock: + files_to_remove = list(current_files) + created_count = graphs_created.value + for filepath in files_to_remove: + if os.path.exists(filepath): + try: + os.remove(filepath) + print(f'Removed incomplete file: {filepath}', file=sys.stderr) + except OSError as e: + print(f'Failed to remove {filepath}: {e}', file=sys.stderr) + print(f'\nTotal graphs created before interruption: {created_count}', file=sys.stderr) + print('Exiting.', file=sys.stderr) + sys.exit(0) + def splithalf(string): """split a sentence in two halves""" @@ -39,27 +68,30 @@ def splithalf(string): def format_title1(s1, s2): + """Format title for vector size.""" if str(s2)[0] == '8': - return f"{s1} on an {s2} Bytes Vector".format(s1, s2) + return f"{s1} on an {s2} Bytes Vector" else: - return f"{s1} on a {s2} Bytes Vector".format(s1, s2) + return f"{s1} on a {s2} Bytes Vector" def format_title2(s1, s2): + """Format title for threads.""" if s2 == 1: - return f"{s1} on {s2} Thread".format(s1, s2) + return f"{s1} on {s2} Thread" else: - return f"{s1} on {s2} Threads".format(s1, s2) + return f"{s1} on {s2} Threads" -def create_dataframe(xls, sheetname): - """create a dataframe from an excel file; are we interested in throughput or transactions?""" +def create_dataframe(xls, sheetname, xvar): + """create a dataframe from an excel file; are we interested in throughput or transactions?""" df = pd.read_excel(xls, sheet_name=sheetname) df.sort_values(by=[xvar]) return df -def determine_measure(testcase): +def determine_measure(testcase, col3name): + """determine if we are measuring TPS or throughput based on test case name""" if "signature" in testcase.lower() or "hmac" in testcase.lower(): # for signature and HMAC algos, we are interested only in knowing the TPS measure = 'tps' @@ -73,8 +105,8 @@ def determine_measure(testcase): return measure, unit, col2, col3 -def create_graph_frame(df, testcase, item): - measure, unit, col2, col3 = determine_measure(testcase) +def create_graph_frame(df, testcase, item, xvar, graph_parameter, col3name, p95, p98, p99): + measure, unit, col2, col3 = determine_measure(testcase, col3name) frame = df.loc[(df['test case'] == testcase) & (df[graph_parameter] == item), [xvar, 'latency average value', col2]] frame['tp_upper'] = frame[col2] + df[f'{measure} global error'] @@ -89,44 +121,303 @@ def create_graph_frame(df, testcase, item): frame['tp_xvar_lower'] = frame[col3] - df[f'{measure} global error'] / frame[xvar] frame['tp_xvar_lower'] = frame['tp_xvar_lower'].map(lambda x: max(x, 0)) - if args.p95 or args.p98 or args.p99: + percentiles_available = True + if p95 or p98 or p99: try: frame['p95'] = df['latency p95 value'] frame['p98'] = df['latency p98 value'] frame['p99'] = df['latency p99 value'] except KeyError: print("\n\nPercentiles not present in the spreadsheet, ignoring the percentiles flag.\n\n") - args.p95, args.p98, args.p99 = False, False, False + percentiles_available = False - return frame, measure, unit, col2, col3 + return frame, measure, unit, col2, col3, percentiles_available -def comparison_labels(xlsfp, xlsfp2): - if not args.comparison: - xlsfp.label = '', '' - if args.labels is not None: - print('Not in comparison mode, ignoring labels. Did you forget to specify -c flag?') +def _create_single_graph_worker(df1, df2, testcase, item, params): + """Global worker function for multiprocessing. + + This needs to be a module-level function (not a method) so it can be pickled. + """ + print(f"Drawing graph for {testcase} and {params['graph_parameter']} {item}...", end='') + frame1, measure, unit, col2, col3, p_avail = create_graph_frame( + df1, testcase, item, params['xvar'], params['graph_parameter'], + params['col3name'], params['p95'], params['p98'], params['p99']) + frame2 = None + if params['comparison']: + frame2, measure2, _, _, _, _ = create_graph_frame( + df2, testcase, item, params['xvar'], params['graph_parameter'], + params['col3name'], params['p95'], params['p98'], params['p99']) + + fig, (ax, ax2) = plt.subplots(2, figsize=(16, 16), height_ratios=(3, 1)) + + ax.plot(frame1[params['xvar']], frame1[f'{measure} global value'], marker='v', color='tab:blue') + if not params['no_error_region']: + ax.plot(frame1[params['xvar']], frame1['tp_upper'], color='tab:blue', alpha=0.4) + ax.plot(frame1[params['xvar']], frame1['tp_lower'], color='tab:blue', alpha=0.4) + ax.fill_between(frame1[params['xvar']], frame1['tp_upper'], frame1['tp_lower'], facecolor='tab:blue', + alpha=0.4) + + if params['comparison']: + ax.plot(frame2[params['xvar']], frame2[f'{measure} global value'], marker='^', color='tab:blue', + linestyle='--') + if not params['no_error_region']: + ax.plot(frame2[params['xvar']], frame2['tp_upper'], color='tab:blue', alpha=0.4, linestyle='--') + ax.plot(frame2[params['xvar']], frame2['tp_lower'], color='tab:blue', alpha=0.4, linestyle='--') + ax.fill_between(frame2[params['xvar']], frame2['tp_upper'], frame2['tp_lower'], + facecolor='tab:blue', alpha=0.4, linestyle='--') + + title = params['format_title'](testcase, item) + if params['comparison']: + title += f': {params["xlsfp_label"][0]} vs {params["xlsfp2_label"][0]}' + title = "{}\n{}".format(*splithalf(title)) + ax.set_title(title) + ax.set_xlabel(params['xlabel']) + ax.set_ylabel(f'Throughput ({unit})', color='tab:blue') + ax.tick_params(axis='y', labelcolor='tab:blue') + ax.xaxis.set_major_locator(MaxNLocator(integer=True)) + ax.grid('on', which='both', axis='x') + ax.grid('on', which='major', axis='y', linestyle='-', color='tab:blue', alpha=0.3) + + ax1 = ax.twinx() # add second plot to the same axes, sharing x-axis + ax1.plot(np.nan, marker='v', label=f'{measure}, global {params["xlsfp_label"][1]}', + color='tab:blue') # Make an agent in ax + if params['comparison']: + ax1.plot(np.nan, marker='^', label=f'{measure}, global {params["xlsfp2_label"][1]}', color='tab:blue', + linestyle='--') # Make an agent in ax + + ax1.plot(frame1[params['xvar']], frame1['latency average value'], label=f'latency average {params["xlsfp_label"][1]}', + color='black', marker='p') + + # Add horizontal dash-dot grid lines for latency average axis + ax1.grid(True, which='major', axis='y', linestyle='-.', color='black', alpha=0.3, zorder=2) + + + # Create a third y-axis for percentiles if they are enabled + if params['p95'] or params['p98'] or params['p99']: + ax_percentiles = ax.twinx() + # Offset the third axis to the right + ax_percentiles.spines['right'].set_position(('axes', 1.1)) + + if params['p95']: + ax_percentiles.plot(frame1[params['xvar']], frame1['p95'], color='plum', alpha=1.0, label=f'latency p95 {params["xlsfp_label"][1]}', marker='1', zorder=5) + ax_percentiles.fill_between(frame1[params['xvar']], frame1['p95'], facecolor='palegreen', alpha=0.2, zorder=1) + if params['p98']: + ax_percentiles.plot(frame1[params['xvar']], frame1['p98'], color='mediumorchid', alpha=1.0, label=f'latency p98 {params["xlsfp_label"][1]}', marker='2', zorder=5) + ax_percentiles.fill_between(frame1[params['xvar']], frame1['p98'], facecolor='lightgreen', alpha=0.2, zorder=1) + if params['p99']: + ax_percentiles.plot(frame1[params['xvar']], frame1['p99'], color='darkorchid', alpha=1.0, label=f'latency p99 {params["xlsfp_label"][1]}', marker='3', zorder=5) + ax_percentiles.fill_between(frame1[params['xvar']], frame1['p99'], facecolor='limegreen', alpha=0.2, zorder=1) + + ax_percentiles.set_ylabel('Latency Percentiles (ms)', color='darkviolet') + ax_percentiles.tick_params(axis='y', labelcolor='darkviolet') + + # Add horizontal dashed grid lines for percentile axis + ax_percentiles.grid(True, which='major', axis='y', linestyle='--', color='darkviolet', alpha=0.3, zorder=2) + + if not params['no_error_region']: + ax1.plot(np.nan, label=f'{measure} error', color='tab:blue', alpha=0.4) # Make an agent in ax + ax1.plot(frame1[params['xvar']], frame1['latency_upper'], label='latency error region', color='grey', + alpha=0.4) + ax1.plot(frame1[params['xvar']], frame1['latency_lower'], color='grey', alpha=0.4) + ax1.fill_between(frame1[params['xvar']], frame1['latency_upper'], frame1['latency_lower'], + facecolor='grey', alpha=0.4) + + if params['comparison']: + ax1.plot(frame2[params['xvar']], frame2['latency average value'], label=f'latency {params["xlsfp2_label"][1]}', + color='black', marker='*', linestyle='--') + if not params['no_error_region']: + ax1.plot(frame2[params['xvar']], frame2['latency_upper'], color='grey', alpha=0.4, linestyle='--') + ax1.plot(frame2[params['xvar']], frame2['latency_lower'], color='grey', alpha=0.4, linestyle='--') + ax1.fill_between(frame2[params['xvar']], frame2['latency_upper'], frame2['latency_lower'], + facecolor='grey', alpha=0.4, linestyle='--') + + ax1.set_ylabel('Latency Average (ms)') + + # Set x-axis and y-axis limits to remove margins + ax.set_xlim(left=frame1[params['xvar']].min(), right=frame1[params['xvar']].max()) + ax.set_ylim(bottom=0) + ax1.set_ylim(bottom=0) + if params['p95'] or params['p98'] or params['p99']: + ax_percentiles.set_ylim(bottom=0) + + # Merge legends from all axes and attach to the topmost axis + handles1, labels1 = ax1.get_legend_handles_labels() + if params['p95'] or params['p98'] or params['p99']: + handles_percentiles, labels_percentiles = ax_percentiles.get_legend_handles_labels() + handles1 += handles_percentiles + labels1 += labels_percentiles + # Attach legend to ax_percentiles (the topmost axis) instead of ax1 + legend = ax_percentiles.legend(handles1, labels1, loc='lower right', fancybox=False, framealpha=1.0, facecolor='white') else: - if args.labels is None: - xlsfp.label = 'data set 1', '(data set 1)' - xlsfp2.label = 'data set 2', '(data set 2)' - else: - xlsfp.label = args.labels[0], f'({args.labels[0]})' - xlsfp2.label = args.labels[1], f'({args.labels[1]})' - + legend = ax1.legend(handles1, labels1, loc='lower right', fancybox=False, framealpha=1.0, facecolor='white') + legend.set_zorder(100) + + # second subplot with tp per item + if params['indvar'] == 'threads': + label = f'{measure}/{params["xvar"]} {params["xlsfp_label"][1]}' + if params['indvar'] == 'size': + label = 'transactions' + + ax2.plot(frame1[params['xvar']], frame1[params['ycomparison'].format(measure)], marker='+', + label=label, color='tab:red') + + if not params['no_error_region']: + if params['indvar'] == 'threads': + label = f'{measure}/{params["xvar"]} error region' + if params['indvar'] == 'size': + label = 'transactions error region' + ax2.plot(frame1[params['xvar']], frame1['tp_xvar_upper'], color='tab:red', + label=label, alpha=0.4) + ax2.plot(frame1[params['xvar']], frame1['tp_xvar_lower'], color='tab:red', alpha=0.4) + ax2.fill_between(frame1[params['xvar']], frame1['tp_xvar_upper'], frame1['tp_xvar_lower'], + facecolor='tab:red', alpha=0.4) + if params['comparison']: + ax2.plot(frame2[params['xvar']], frame2[params['ycomparison'].format(measure)], marker='x', + label=f'{measure}/{params["xvar"]} {params["xlsfp2_label"][1]}', + color='tab:red', linestyle='--') + if not params['no_error_region']: + ax2.plot(frame2[params['xvar']], frame2['tp_xvar_upper'], color='tab:red', alpha=0.4, linestyle='--') + ax2.plot(frame2[params['xvar']], frame2['tp_xvar_lower'], color='tab:red', alpha=0.4, linestyle='--') + ax2.fill_between(frame2[params['xvar']], frame2['tp_xvar_upper'], frame2['tp_xvar_lower'], + facecolor='tab:red', alpha=0.4) + + ax2.set_xlabel(params['xlabel']) + if params['indvar'] == 'threads': + ax2.set_ylabel(f'Throughput ({unit})') + if params['indvar'] == 'size': + ax2.set_ylabel('Transactions/s') + ax2.xaxis.set_major_locator(MaxNLocator(integer=True)) + ax2.set_xlim(left=frame1[params['xvar']].min(), right=frame1[params['xvar']].max()) + ax2.set_ylim(bottom=0) + ax2.grid('on', which='both', axis='x') + ax2.grid('on', which='major', axis='y') + ax2.legend(loc='upper right', fancybox=False, framealpha=1.0, facecolor='white') + + # add some regression lines + def rline_throughput(): + def throughput_model(z, a, b): + return a * z / (z + b) + + popt, pcov = curve_fit(throughput_model, frame1['vector size'], + frame1[f'{measure} global value'] / 100000) + x_tp = np.linspace(16, 2048, 1000) + y_tp = throughput_model(x_tp, *popt) + df_throughput_model = pd.DataFrame({'vector size': x_tp, 'model values': y_tp * 100000}) + ax.plot(df_throughput_model['vector size'], df_throughput_model['model values'], marker=',', + color='tab:green', linestyle='--') + ax1.plot(np.nan, linestyle='--', color='tab:green', + label=r"""Throughput model: $y=\frac{{{}x}}{{x+{}}}$""".format(int(popt[0] * 100000), + int(popt[1]))) + + def rline_latency(): + def latency_model(z, a, b): + return a + z * b + + popt1, pcov1 = curve_fit(latency_model, frame1['vector size'], frame1['latency average value']) + x_lt = np.linspace(16, 2048, 100) + y_lt = latency_model(x_lt, *popt1) + df_latency_model = pd.DataFrame({'vector size': x_lt, 'model values': y_lt}) + a, b = '{0:.3f}'.format(popt1[0]), '{0:.3f}'.format(popt1[1]) + ax1.plot(df_latency_model['vector size'], df_latency_model['model values'], marker=',', + color='orange', linestyle='dashdot', label=r'Latency model: $y={}+{}x$'.format(a, b)) + ax1.legend(loc='lower right') + + if params['reglines']: + rline_throughput() + rline_latency() + + plt.tight_layout() + filename = testcase.lower().replace(' ', '_') + + # Track files being created with lock protection + files_being_saved = [] + + if 'svg' in params['output_format'] or 'all' in params['output_format']: + svg_file = os.path.join(params['output_dir'], f'{filename}-{params["fnsub"]}{item}.svg') + files_being_saved.append(svg_file) + if 'png' in params['output_format'] or 'all' in params['output_format']: + png_file = os.path.join(params['output_dir'], f'{filename}-{params["fnsub"]}{item}.png') + files_being_saved.append(png_file) + + # Add files to tracking list with lock + with files_lock: + current_files.extend(files_being_saved) + + # Save files + for file_path in files_being_saved: + if file_path.endswith('.svg'): + plt.savefig(file_path, format='svg', orientation='landscape') + elif file_path.endswith('.png'): + plt.savefig(file_path, format='png', orientation='landscape') + + # Remove files from tracking list after successful save + with files_lock: + for f in files_being_saved: + if f in current_files: + current_files.remove(f) + # Increment counter of successfully created graphs + graphs_created.value += 1 + + plt.cla() + plt.close(fig) + print('OK', flush=True) -def generate_graphs(xlsfp, sheetname, xlsfp2): - comparison_labels(xlsfp, xlsfp2) - xls_tuple = xlsfp, xlsfp - if args.comparison: - xls_tuple = xlsfp, xlsfp2 - with xls_tuple[0], xls_tuple[1]: +class GraphGenerator: + """Class to encapsulate graph generation logic and parameters.""" + + def __init__(self, xlsfp, sheetname, xlsfp2, xvar, graph_parameter, xlabel, ycomparison, fnsub, col3name, format_title, + comparison, labels, no_error_region, p95, p98, p99, output_format, indvar, reglines, output_dir='.'): + self.xlsfp = xlsfp + self.sheetname = sheetname + self.xlsfp2 = xlsfp2 + self.xvar = xvar + self.graph_parameter = graph_parameter + self.xlabel = xlabel + self.ycomparison = ycomparison + self.fnsub = fnsub + self.col3name = col3name + self.format_title = format_title + self.comparison = comparison + self.labels = labels + self.no_error_region = no_error_region + self.p95 = p95 + self.p98 = p98 + self.p99 = p99 + self.output_format = output_format + self.indvar = indvar + self.reglines = reglines + self.output_dir = output_dir + + self._setup_comparison_labels() + + def _setup_comparison_labels(self): + """Setup labels for comparison mode.""" + if not self.comparison: + self.xlsfp_label = ('', '') + self.xlsfp2_label = None + if self.labels is not None: + print('Not in comparison mode, ignoring labels. Did you forget to specify -c flag?') + else: + if self.labels is None: + self.xlsfp_label = ('data set 1', '(data set 1)') + self.xlsfp2_label = ('data set 2', '(data set 2)') + else: + self.xlsfp_label = (self.labels[0], f'({self.labels[0]})') + self.xlsfp2_label = (self.labels[1], f'({self.labels[1]})') + + def generate(self, num_processes=None): + """Generate all graphs using multiprocessing. + + Args: + num_processes: Number of parallel processes to use. If None, uses CPU count. + """ # read from spreadsheet directly - df1 = create_dataframe(xlsfp, sheetname) - - if args.comparison: - df2 = create_dataframe(xlsfp2, 'Sheet1') + df1 = create_dataframe(self.xlsfp, self.sheetname, self.xvar) + df2 = None + if self.comparison: + df2 = create_dataframe(self.xlsfp2, 'Sheet1', self.xvar) ### could reintroduce this logic below. removed for now... # if not (measure1 == measure2) and (df1[graph_parameter].unique() == df2[graph_parameter].unique()): # raise AssertionError('Please compare similar things.') @@ -134,181 +425,91 @@ def generate_graphs(xlsfp, sheetname, xlsfp2): # else: # measure = measure1 + # Build list of all tasks (testcase, item) pairs + tasks = [] for testcase in df1["test case"].unique(): - for item in sorted(df1.loc[(df1['test case'] == testcase)][graph_parameter].unique()): - print(f"Drawing graph for {testcase} and {graph_parameter} {item}...", end='') - frame1, measure, unit, col2, col3 = create_graph_frame(df1, testcase, item) - if args.comparison: - frame2, measure2, _, _, _ = create_graph_frame(df2, testcase, item) - - fig, (ax, ax2) = plt.subplots(2, figsize=(16, 16), height_ratios=(3, 1)) - - ax.plot(frame1[xvar], frame1[f'{measure} global value'], marker='v', color='tab:blue') - if not args.no_error_region: - ax.plot(frame1[xvar], frame1['tp_upper'], color='tab:blue', alpha=0.4) - ax.plot(frame1[xvar], frame1['tp_lower'], color='tab:blue', alpha=0.4) - ax.fill_between(frame1[xvar], frame1['tp_upper'], frame1['tp_lower'], facecolor='tab:blue', - alpha=0.4) - - if args.comparison: - ax.plot(frame2[xvar], frame2[f'{measure} global value'], marker='^', color='tab:blue', - linestyle='--') - if not args.no_error_region: - ax.plot(frame2[xvar], frame2['tp_upper'], color='tab:blue', alpha=0.4, linestyle='--') - ax.plot(frame2[xvar], frame2['tp_lower'], color='tab:blue', alpha=0.4, linestyle='--') - ax.fill_between(frame2[xvar], frame2['tp_upper'], frame2['tp_lower'], - facecolor='tab:blue', alpha=0.4, linestyle='--') - - title = format_title(testcase, item) - if args.comparison: - title += f': {xlsfp.label[0]} vs {xlsfp2.label[0]}' - title = "{}\n{}".format(*splithalf(title)) - ax.set_title(title) - ax.set_xlabel(xlabel) - ax.set_ylabel(f'Throughput ({unit})') - ax.grid('on', which='both', axis='x') - ax.grid('on', which='major', axis='y') - - ax1 = ax.twinx() # add second plot to the same axes, sharing x-axis - ax1.plot(np.nan, marker='v', label=f'{measure}, global {xlsfp.label[1]}', - color='tab:blue') # Make an agent in ax - if args.comparison: - ax1.plot(np.nan, marker='^', label=f'{measure}, global {xlsfp2.label[1]}', color='tab:blue', - linestyle='--') # Make an agent in ax - - ax1.plot(frame1[xvar], frame1['latency average value'], label=f'latency average {xlsfp.label[1]}', - color='black', marker='p') - - - if args.p95: - ax1.plot(frame1[xvar], frame1['p95'], color='green', alpha=1.0, label=f'latency p95 {xlsfp.label[1]}', marker='1') - ax1.fill_between(frame1[xvar], frame1['p95'], facecolor='grey', alpha=0.2) - if args.p98: - ax1.plot(frame1[xvar], frame1['p98'], color='red', alpha=1.0, label=f'latency p98 {xlsfp.label[1]}', marker='2') - ax1.fill_between(frame1[xvar], frame1['p98'], facecolor='grey', alpha=0.2) - if args.p99: - ax1.plot(frame1[xvar], frame1['p99'], color='blue', alpha=1.0, label=f'latency p99 {xlsfp.label[1]}', marker='3') - ax1.fill_between(frame1[xvar], frame1['p99'], facecolor='grey', alpha=0.2) - - if not args.no_error_region: - ax1.plot(np.nan, label=f'{measure} error', color='tab:blue', alpha=0.4) # Make an agent in ax - ax1.plot(frame1[xvar], frame1['latency_upper'], label='latency error region', color='grey', - alpha=0.4) - ax1.plot(frame1[xvar], frame1['latency_lower'], color='grey', alpha=0.4) - ax1.fill_between(frame1[xvar], frame1['latency_upper'], frame1['latency_lower'], - facecolor='grey', alpha=0.4) - - if args.comparison: - ax1.plot(frame2[xvar], frame2['latency average value'], label=f'latency {xlsfp2.label[1]}', - color='black', marker='*', linestyle='--') - if not args.no_error_region: - ax1.plot(frame2[xvar], frame2['latency_upper'], color='grey', alpha=0.4, linestyle='--') - ax1.plot(frame2[xvar], frame2['latency_lower'], color='grey', alpha=0.4, linestyle='--') - ax1.fill_between(frame2[xvar], frame2['latency_upper'], frame2['latency_lower'], - facecolor='grey', alpha=0.4, linestyle='--') - - ax1.set_ylabel('Latency (ms)') - ax1.legend(loc='lower right') - - # second subplot with tp per item - if args.indvar == 'threads': - label = f'{measure}/{xvar} {xlsfp.label[1]}' - if args.indvar == 'size': - label = 'transactions' - - ax2.plot(frame1[xvar], frame1[ycomparison.format(measure)], marker='+', - label=label, color='tab:red') - - if not args.no_error_region: - if args.indvar == 'threads': - label = f'{measure}/{xvar} error region' - if args.indvar == 'size': - label = 'transactions error region' - ax2.plot(frame1[xvar], frame1['tp_xvar_upper'], color='tab:red', - label=label, alpha=0.4) - ax2.plot(frame1[xvar], frame1['tp_xvar_lower'], color='tab:red', alpha=0.4) - ax2.fill_between(frame1[xvar], frame1['tp_xvar_upper'], frame1['tp_xvar_lower'], - facecolor='tab:red', alpha=0.4) - if args.comparison: - ax2.plot(frame2[xvar], frame2[ycomparison.format(measure)], marker='x', - label=f'{measure}/{xvar} {xlsfp2.label[1]}', - color='tab:red', linestyle='--') - if not args.no_error_region: - ax2.plot(frame2[xvar], frame2['tp_xvar_upper'], color='tab:red', alpha=0.4, linestyle='--') - ax2.plot(frame2[xvar], frame2['tp_xvar_lower'], color='tab:red', alpha=0.4, linestyle='--') - ax2.fill_between(frame2[xvar], frame2['tp_xvar_upper'], frame2['tp_xvar_lower'], - facecolor='tab:red', alpha=0.4) - - ax2.set_xlabel(xlabel) - if args.indvar == 'threads': - ax2.set_ylabel(f'Throughput ({unit})') - if args.indvar == 'size': - ax2.set_ylabel('Transactions/s') - ax2.grid('on', which='both', axis='x') - ax2.grid('on', which='major', axis='y') - ax2.legend(loc='upper right') - - # add some regression lines - def rline_throughput(): - def throughput_model(z, a, b): - return a * z / (z + b) - - popt, pcov = curve_fit(throughput_model, frame1['vector size'], - frame1[f'{measure} global value'] / 100000) - x_tp = np.linspace(16, 2048, 1000) - y_tp = throughput_model(x_tp, *popt) - df_throughput_model = pd.DataFrame({'vector size': x_tp, 'model values': y_tp * 100000}) - ax.plot(df_throughput_model['vector size'], df_throughput_model['model values'], marker=',', - color='tab:green', linestyle='--') - ax1.plot(np.nan, linestyle='--', color='tab:green', - label=r"""Throughput model: $y=\frac{{{}x}}{{x+{}}}$""".format(int(popt[0] * 100000), - int(popt[1]))) - - def rline_latency(): - def latency_model(z, a, b): - return a + z * b - - popt1, pcov1 = curve_fit(latency_model, frame1['vector size'], frame1['latency average value']) - x_lt = np.linspace(16, 2048, 100) - y_lt = latency_model(x_lt, *popt1) - df_latency_model = pd.DataFrame({'vector size': x_lt, 'model values': y_lt}) - a, b = '{0:.3f}'.format(popt1[0]), '{0:.3f}'.format(popt1[1]) - ax1.plot(df_latency_model['vector size'], df_latency_model['model values'], marker=',', - color='orange', linestyle='dashdot', label=r'Latency model: $y={}+{}x$'.format(a, b)) - ax1.legend(loc='lower right') - - if hasattr(args, "reglines"): - if args.reglines: - rline_throughput() - rline_latency() - - plt.tight_layout() - filename = testcase.lower().replace(' ', '_') - if 'svg' in args.format or 'all' in args.format: - plt.savefig(f'{filename}-{fnsub}{item}.svg', format='svg', orientation='landscape') - if 'png' in args.format or 'all' in args.format: - plt.savefig(f'{filename}-{fnsub}{item}.png', format='png', orientation='landscape') - plt.cla() - plt.close(fig) - print('OK', flush=True) + for item in sorted(df1.loc[(df1['test case'] == testcase)][self.graph_parameter].unique()): + tasks.append((testcase, item)) + + # Process tasks in parallel + if num_processes == 1: + # Serial execution for debugging + for testcase, item in tasks: + self._create_single_graph(df1, df2, testcase, item) + else: + # Parallel execution - extract parameters needed by workers + worker_params = { + 'xvar': self.xvar, + 'graph_parameter': self.graph_parameter, + 'xlabel': self.xlabel, + 'ycomparison': self.ycomparison, + 'fnsub': self.fnsub, + 'col3name': self.col3name, + 'format_title': self.format_title, + 'comparison': self.comparison, + 'xlsfp_label': self.xlsfp_label, + 'xlsfp2_label': self.xlsfp2_label if self.comparison else None, + 'no_error_region': self.no_error_region, + 'p95': self.p95, + 'p98': self.p98, + 'p99': self.p99, + 'output_format': self.output_format, + 'indvar': self.indvar, + 'reglines': self.reglines, + 'output_dir': self.output_dir + } + + with mp.Pool(processes=num_processes) as pool: + pool.starmap(_create_single_graph_worker, + [(df1, df2, testcase, item, worker_params) for testcase, item in tasks]) + + def _create_single_graph(self, df1, df2, testcase, item): + """Create and save a single graph for a specific testcase and item (serial execution).""" + # Build params dict and call the worker function + worker_params = { + 'xvar': self.xvar, + 'graph_parameter': self.graph_parameter, + 'xlabel': self.xlabel, + 'ycomparison': self.ycomparison, + 'fnsub': self.fnsub, + 'col3name': self.col3name, + 'format_title': self.format_title, + 'comparison': self.comparison, + 'xlsfp_label': self.xlsfp_label, + 'xlsfp2_label': self.xlsfp2_label if self.comparison else None, + 'no_error_region': self.no_error_region, + 'p95': self.p95, + 'p98': self.p98, + 'p99': self.p99, + 'output_format': self.output_format, + 'indvar': self.indvar, + 'reglines': self.reglines, + 'output_dir': self.output_dir + } + _create_single_graph_worker(df1, df2, testcase, item, worker_params) if __name__ == '__main__': + # Register signal handler for CTRL-C + signal.signal(signal.SIGINT, signal_handler) + parser = argparse.ArgumentParser(description='Generate graphs from spreadsheet of p11perftest results') - parser.add_argument('xls', metavar='FILE', type=argparse.FileType('rb'), help='Path to Excel spreadsheet') + parser.add_argument('xls', metavar='FILE', type=str, help='Path to Excel spreadsheet') parser.add_argument('-t', '--table', help='Table name.', default=0) parser.add_argument('-f', '--format', help='Output format. Defaults to all (png and svg).', choices=['png', 'svg', 'all'], default='all') + parser.add_argument('-j', '--jobs', type=int, metavar='N', help='Number of parallel jobs to run. Default is CPU count. Use 1 for serial execution.') + parser.add_argument('-d', '--directory', metavar='DIR', help='Directory where graphs will be saved. Must exist and be writable. Defaults to current directory.', default='.') - parser.add_argument('-p', '--percentiles', help='Display percentile plots on graph. Equivalent to -p95 -p98 -p99.', action='store_true') - parser.add_argument('-p95', help='Display 95th percentile plot on graph.', action='store_true') - parser.add_argument('-p98', help='Display 98th percentile plot on graph.', action='store_true') - parser.add_argument('-p99', help='Display 99th percentile plot on graph.', action='store_true') + parser.add_argument('-p', '--percentiles', help='Display percentile plots on graph. Equivalent to --p95 --p98 --p99.', action='store_true') + parser.add_argument('--p95', help='Display 95th percentile plot on graph.', action='store_true') + parser.add_argument('--p98', help='Display 98th percentile plot on graph.', action='store_true') + parser.add_argument('--p99', help='Display 99th percentile plot on graph.', action='store_true') parser.add_argument('--no-error-region', help='Remove error regions from plot.', action='store_true') parser.add_argument('-c', '--comparison', help='Compare two datasets. Provide the path to a second Excel spreadsheet.', metavar='FILE', - type=argparse.FileType('rb')) + type=str) subparsers = parser.add_subparsers(dest='indvar') size = subparsers.add_parser('size', @@ -317,10 +518,37 @@ def latency_model(z, a, b): help='Add lines of best fit for latency and throughput using predefined mathematical model.', action='store_true') threads = subparsers.add_parser('threads', help='Set number of threads as independent variable.') - parser.add_argument('-l', '--labels', help='Dataset labels. Defaults to "data set 1" and "data set 2".', nargs=2) + parser.add_argument('-l', '--labels', metavar=('LABEL1', 'LABEL2'), help='Dataset labels. Defaults to "data set 1" and "data set 2".', nargs=2) args = parser.parse_args() + # Validate input files + if not os.path.exists(args.xls): + print(f"Error: File '{args.xls}' does not exist.", file=sys.stderr) + sys.exit(1) + if not os.path.isfile(args.xls): + print(f"Error: '{args.xls}' is not a file.", file=sys.stderr) + sys.exit(1) + + if args.comparison: + if not os.path.exists(args.comparison): + print(f"Error: File '{args.comparison}' does not exist.", file=sys.stderr) + sys.exit(1) + if not os.path.isfile(args.comparison): + print(f"Error: '{args.comparison}' is not a file.", file=sys.stderr) + sys.exit(1) + + # Validate output directory + if not os.path.exists(args.directory): + print(f"Error: Directory '{args.directory}' does not exist.", file=sys.stderr) + sys.exit(1) + if not os.path.isdir(args.directory): + print(f"Error: '{args.directory}' is not a directory.", file=sys.stderr) + sys.exit(1) + if not os.access(args.directory, os.W_OK): + print(f"Error: Directory '{args.directory}' is not writable.", file=sys.stderr) + sys.exit(1) + if args.indvar is None: args.indvar = 'threads' @@ -337,5 +565,13 @@ def latency_model(z, a, b): if args.percentiles: args.p95, args.p98, args.p99 = True, True, True - - generate_graphs(args.xls, args.table, args.comparison) + # Create graph generator and generate all graphs + generator = GraphGenerator(args.xls, args.table, args.comparison, + xvar, graph_parameter, xlabel, ycomparison, fnsub, col3name, format_title, + args.comparison, args.labels, args.no_error_region, + args.p95, args.p98, args.p99, args.format, args.indvar, + getattr(args, 'reglines', False), args.directory) + generator.generate(num_processes=getattr(args, 'jobs', None)) + + # Print summary + print(f'\nTotal graphs created: {graphs_created.value}') diff --git a/scripts/json2xlsx.py b/scripts/json2xlsx.py index f9d8496..3916330 100755 --- a/scripts/json2xlsx.py +++ b/scripts/json2xlsx.py @@ -116,6 +116,9 @@ def add_a_row(self, filename, testcase, key, vectorname, vector): def recursive_title(vector, prefix=""): for subk,subv in vector.items(): + # Skip datapoints array if present + if subk == 'datapoints': + continue if not isinstance(subv,(dict)): column_title = (prefix + f"{subk} ").strip() column_dict = { 'header':column_title } @@ -133,6 +136,9 @@ def recursive_title(vector, prefix=""): def recursive_value(vector, prefix=""): for subk,subv in vector.items(): + # Skip datapoints array if present + if subk == 'datapoints': + continue if not isinstance(subv,(dict)): self.worksheet.write(self.row, self.col, cast.get(subk, noop)(subv)) self.col+=1 diff --git a/src/Makefile.am b/src/Makefile.am index e6e96d2..c6ec904 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -38,6 +38,7 @@ bin_PROGRAMS = p11perftest p11perftest_SOURCES = p11benchmark.cpp p11benchmark.hpp \ p11rsasig.cpp p11rsasig.hpp \ + p11rsapss.cpp p11rsapss.hpp \ p11oaepunw.cpp p11oaepunw.hpp \ p11oaepdec.cpp p11oaepdec.hpp \ p11oaepenc.cpp p11oaepenc.hpp \ @@ -55,6 +56,7 @@ p11perftest_SOURCES = p11benchmark.cpp p11benchmark.hpp \ p11xorkeydataderive.cpp p11xorkeydataderive.hpp \ p11seedrandom.cpp p11seedrandom.hpp \ p11genrandom.cpp p11genrandom.hpp \ + p11findobjects.cpp p11findobjects.hpp \ stringhash.hpp \ errorcodes.cpp errorcodes.hpp \ keygenerator.cpp keygenerator.hpp \ @@ -68,6 +70,6 @@ p11perftest_SOURCES = p11benchmark.cpp p11benchmark.hpp \ implementation.cpp implementation.hpp \ p11perftest.cpp -p11perftest_LDADD = $(BOTAN_LIBS) $(BOOST_TIMER_LIB) $(BOOST_PROGRAM_OPTIONS_LIB) $(BOOST_CHRONO_LIB) $(LIBCRYPTO_LIBS) $(PTHREAD_LIBS) +p11perftest_LDADD = $(BOTAN_LIBS) $(BOOST_PROGRAM_OPTIONS_LIB) $(LIBCRYPTO_LIBS) $(PTHREAD_LIBS) diff --git a/src/errorcodes.cpp b/src/errorcodes.cpp index d50e0d9..f05e625 100644 --- a/src/errorcodes.cpp +++ b/src/errorcodes.cpp @@ -19,7 +19,28 @@ #include #include "errorcodes.hpp" -const std::string errorcode(int rc) { +static const std::string _errorcode(int rc); + +// overloaded is needed for std::visit + +template +struct overloaded : Ts... { using Ts::operator()...; }; +template +overloaded(Ts...) -> overloaded; + + +const std::string errorcode(benchmark_result::operation_outcome_t outcome) { + return std::visit( + overloaded { + [&](benchmark_result::Ok) -> std::string { return "CKR_OK"; }, + [&](benchmark_result::NotFound const& nf) -> std::string { return nf.what(); }, + [&](benchmark_result::ApiErr const& apiErr) -> std::string { return _errorcode(apiErr); } + }, outcome ); +} + +// static function to map error codes to strings + +static const std::string _errorcode(int rc) { switch ( rc ) { case CKR_OK: return "CKR_OK"; diff --git a/src/errorcodes.hpp b/src/errorcodes.hpp index 67bb26f..08a06a1 100644 --- a/src/errorcodes.hpp +++ b/src/errorcodes.hpp @@ -21,7 +21,9 @@ #include #include "../config.h" +#include "p11benchmark.hpp" + +const std::string errorcode(benchmark_result::operation_outcome_t outcome); -const std::string errorcode(int rc); #endif // ERRORCODES_H diff --git a/src/executor.cpp b/src/executor.cpp index d62a02d..904c09f 100644 --- a/src/executor.cpp +++ b/src/executor.cpp @@ -27,11 +27,14 @@ #include #include #include +#include #include #include #include #include -#include +#include +#include +#include #include #include #include @@ -39,19 +42,20 @@ #include #include #include +#include #include "ConsoleTable.h" #include "errorcodes.hpp" #include "p11benchmark.hpp" #include "measure.hpp" #include "executor.hpp" + // thread sync objects std::mutex greenlight_mtx; std::condition_variable greenlight_cond; bool greenlight = false; namespace bacc = boost::accumulators; -constexpr double nano_to_milli = 1000000.0 ; ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const size_t skipiter, const std::forward_list shortlist ) @@ -61,27 +65,27 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz for(auto testcase: shortlist) { size_t th; - std::vector elapsed_time_array(m_numthreads); - std::vector > future_array(m_numthreads); + std::vector elapsed_time_array(m_numthreads); + std::vector > future_array(m_numthreads); std::vector benchmark_array(m_numthreads); - int last_errcode = CKR_OK; + benchmark_result::operation_outcome_t last_errcode = benchmark_result::Ok{}; - boost::timer::cpu_timer wallclock_t; - nanosecond_type wallclock_elapsed { 0 }; // used to measure how much time in total was spent in executing the test + + milliseconds_double_t wallclock_elapsed { 0 }; // used to measure how much time in total was spent in executing the test // helper functions for ConsoleTable conversion of items to string auto d2s = [] (double arg, int precision=-1) -> std::string { - std::ostringstream stream; - if(precision>=0) stream << std::setprecision(precision); - stream << arg; - return stream.str(); - }; + std::ostringstream stream; + if(precision>=0) stream << std::setprecision(precision); + stream << arg; + return stream.str(); + }; auto i2s = [] (long arg) -> std::string { - std::ostringstream stream; - stream << arg; - return stream.str(); - }; + std::ostringstream stream; + stream << arg; + return stream.str(); + }; std::vector> fact_rows { { "algorithm", "algorithm", benchmark.name() }, @@ -137,7 +141,7 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz // start the wall clock - wallclock_t.start(); + auto wallclock_1 = std::chrono::steady_clock::now(); // give start signal { std::lock_guard greenlight_lck(greenlight_mtx); @@ -151,15 +155,106 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz } // stop wallclock and measure elapsed time - wallclock_t.stop(); - wallclock_elapsed = wallclock_t.elapsed().wall; + auto wallclock_2 = std::chrono::steady_clock::now(); + wallclock_elapsed = std::chrono::duration_cast(wallclock_2 - wallclock_1); + // We need to adjust the cache size so it can hold at least 5% of the entire sample + // the sample size = # of threads x # of iterations per thread + size_t required_cache_size = static_cast(std::ceil(0.05 * static_cast(m_numthreads * iter)))+ 10; + + // we create one accumulator for most of the stats bacc::accumulator_set< double, bacc::stats< bacc::tag::mean, bacc::tag::min, bacc::tag::max, bacc::tag::count, - bacc::tag::variance > > acc; + bacc::tag::variance, + bacc::tag::tail_quantile< bacc::right > + > > acc( bacc::tag::tail::cache_size = required_cache_size ); + + // and one for stats vs log-normal distribution + bacc::accumulator_set< double, bacc::stats< + bacc::tag::mean, + bacc::tag::variance, + bacc::tag::count + > > acc_log; + + // Flag to track if we're using log1p (for small values) or log + bool use_log1p = false; + + // Kolmogorov-Smirnov goodness-of-fit test function + auto kolmogorov_smirnov_gof = [](const std::vector& elapsed_array, + bool use_log) -> double { + // First, calculate the mean to decide log vs log1p + double sum_raw = 0.0; + size_t count = 0; + for(const auto& elapsed : elapsed_array) { + // Only consider successful measurements + if(std::holds_alternative(elapsed.second)) { + for(const auto& it : elapsed.first) { + sum_raw += it.count(); + count++; + } + } + } + bool use_log1p_local = use_log && (count > 0) && (sum_raw / count < 1.0); + + // Collect data + std::vector data; + for(const auto& elapsed : elapsed_array) { + if(std::holds_alternative(elapsed.second)) { + for(const auto& it : elapsed.first) { + double val = it.count(); + if (use_log) { + data.push_back(use_log1p_local ? std::log1p(val) : std::log(val)); + } else { + data.push_back(val); + } + } + } + } + + if (data.empty()) return 0.0; + + size_t n = data.size(); + + // Calculate mean and stddev directly from data + double sum = 0.0; + for (double val : data) sum += val; + double mean = sum / n; + + double sum_sq = 0.0; + for (double val : data) { + double diff = val - mean; + sum_sq += diff * diff; + } + double variance = sum_sq / (n - 1); // Sample variance + double stddev = std::sqrt(variance); + + // Sort data for KS test + std::sort(data.begin(), data.end()); + + // Standard normal CDF: Φ(x) = 0.5 * (1 + erf((x - μ) / (σ * √2))) + auto norm_cdf = [mean, stddev](double x) { + return 0.5 * (1.0 + std::erf((x - mean) / (stddev * std::sqrt(2.0)))); + }; + + // Calculate Kolmogorov-Smirnov statistic + // D = max|F(x) - F_n(x)| where F_n is the empirical CDF + double D = 0.0; + for (size_t i = 0; i < n; ++i) { + double F_theoretical = norm_cdf(data[i]); + double F_empirical_before = static_cast(i) / n; + double F_empirical_after = static_cast(i + 1) / n; + + // KS statistic is the maximum absolute difference + double diff_before = std::abs(F_theoretical - F_empirical_before); + double diff_after = std::abs(F_theoretical - F_empirical_after); + D = std::max(D, std::max(diff_before, diff_after)); + } + + return D; + }; // helper map table for statistics std::map > stats { @@ -168,25 +263,75 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz { "max", [&acc] () { return bacc::max(acc); }}, { "range", [&acc] () { return (bacc::max(acc) - bacc::min(acc)); }}, { "svar", [&acc] () { - auto n = bacc::count(acc); - double f = static_cast(n) / (n - 1); - return f * bacc::variance(acc); }}, + auto n = bacc::count(acc); + double f = static_cast(n) / (n - 1); + return f * bacc::variance(acc); }}, { "sstddev", [&stats] () { return std::sqrt(stats["svar"]()); }}, // note: for error, we take k=2 so 95% of measures are within interval { "error", [&stats] () { return std::sqrt(stats["svar"]()/static_cast( stats["count"]() ))*2; }}, { "count", [&acc] () { return bacc::count(acc); }}, + { "p95", [&acc] () { return bacc::quantile(acc, bacc::quantile_probability = 0.95); }}, + { "p98", [&acc] () { return bacc::quantile(acc, bacc::quantile_probability = 0.98); }}, + { "p99", [&acc] () { return bacc::quantile(acc, bacc::quantile_probability = 0.99); }}, + { "logavg", [&acc_log, &use_log1p] () { + auto n = bacc::count(acc_log); + if (use_log1p) { + return std::expm1( bacc::mean(acc_log) ); // exp(x)-1 for log1p case + } else { + return std::exp( bacc::mean(acc_log) ); + } + }}, + { "logsvar", [&acc_log, &use_log1p] () { + auto n = bacc::count(acc_log); + double f = static_cast(n) / (n - 1); + if (use_log1p) { + return std::expm1( f * bacc::variance(acc_log) ); + } else { + return std::exp( f * bacc::variance(acc_log) ); + } + }}, + { "logsstdev", [&stats] () { return std::sqrt(stats["logsvar"]()); }}, + { "logerror", [&stats] () { return std::sqrt(stats["logsvar"]()/static_cast( stats["count"]() ))*2; }}, + // Kolmogorov-Smirnov goodness-of-fit tests + { "ks_normal", [&kolmogorov_smirnov_gof, &elapsed_time_array] () { + return kolmogorov_smirnov_gof(elapsed_time_array, false); + }}, + { "ks_lognormal", [&kolmogorov_smirnov_gof, &elapsed_time_array] () { + return kolmogorov_smirnov_gof(elapsed_time_array, true); + }} }; // compute statistics + // First pass: compute regular statistics to determine if we need log1p for(auto elapsed: elapsed_time_array) { - if(elapsed.second != CKR_OK) { + if(!std::holds_alternative(elapsed.second)) { last_errcode = elapsed.second; - wallclock_elapsed = 0; + wallclock_elapsed = milliseconds_double_t { 0 }; + break; // something wrong happened, no need to carry on + } + + for(auto &it: elapsed.first) { + double val = it.count(); + acc(val); + } + } + + // Check if average is small (< 1.0), if so use log1p for better numerical stability + use_log1p = (bacc::count(acc) > 0) && (bacc::mean(acc) < 1.0); + + // Second pass: compute log statistics with appropriate transformation + for(auto elapsed: elapsed_time_array) { + if(!std::holds_alternative(elapsed.second)) { break; // something wrong happened, no need to carry on } - for(auto it=elapsed.first.begin(); it!=elapsed.first.end(); ++it) { - acc(*it/nano_to_milli); + for(auto &it: elapsed.first) { + double val = it.count(); + if (use_log1p) { + acc_log(std::log1p(val)); // log(1+x) for small values + } else { + acc_log(std::log(val)); // log(x) for normal values + } } } @@ -194,7 +339,7 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz auto stats_count = stats["count"](); // timer_res is the resolution of the timer - Measure<> timer_res(m_timer_res, m_timer_res_err, "ns"); + Measure<> timer_res(m_timer_res.count(), m_timer_res_err.count(), "ns"); result_rows.emplace_back(std::forward_as_tuple("timer resolution", "timer resolution", std::move(timer_res))); // epsilon represents the max resolution we have for a latency measurement. @@ -203,7 +348,7 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz // it is multiplied by two, as an interval is measured by making two time measurements. Therefore the // uncertainties adds up. // It is converted to milliseconds. - auto epsilon = 2 * (m_timer_res + m_timer_res_err ) / nano_to_milli; + auto epsilon = 2 * std::chrono::duration_cast(m_timer_res + m_timer_res_err).count(); // if the statistical error is less than epsilon, then it is no more significant, // as the measure is blurred by the resolution of the timer. @@ -212,6 +357,13 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz auto latency_avg_err = stats["error"]() < epsilon ? epsilon : stats["error"](); Measure<> latency_avg(latency_avg_val, latency_avg_err, "ms"); result_rows.emplace_back(std::forward_as_tuple("latency, average", "latency.average", std::move(latency_avg))); + + // let's also add the standard deviation + auto latency_stddev_val = stats["sstddev"](); + auto latency_stddev_err = stats["error"]() < epsilon ? epsilon : stats["error"](); + Measure<> latency_stddev(latency_stddev_val, latency_stddev_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, standard deviation", "latency.stddev", std::move(latency_stddev))); + // minimum and maximum are measured directly. their error depends directly upon // the measurement of two times, i.e. t2-t1. Therefore, the error on that measurment // equals twice the precision. @@ -223,6 +375,62 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz auto latency_max_err = epsilon; Measure<> latency_max(latency_max_val, latency_max_err, "ms"); result_rows.emplace_back(std::forward_as_tuple("latency, maximum", "latency.maximum", std::move(latency_max))); + + // p95, p98, p99 quantiles + auto latency_p95_val = stats["p95"](); + auto latency_p95_err = epsilon; + Measure<> latency_p95(latency_p95_val, latency_p95_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, 95th percentile", "latency.p95", std::move(latency_p95))); + auto latency_p98_val = stats["p98"](); + auto latency_p98_err = epsilon; + Measure<> latency_p98(latency_p98_val, latency_p98_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, 98th percentile", "latency.p98", std::move(latency_p98))); + auto latency_p99_val = stats["p99"](); + auto latency_p99_err = epsilon; + Measure<> latency_p99(latency_p99_val, latency_p99_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, 99th percentile", "latency.p99", std::move(latency_p99))); + + // log-normal stats + auto latency_log_geomavg_val = stats["logavg"](); + auto latency_log_geomavg_err = stats["logerror"](); + Measure<> latency_log_geomavg(latency_log_geomavg_val, latency_log_geomavg_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, log-normal geom average", "latency.logavg", std::move(latency_log_geomavg))); + auto latency_log_geomsstddev_val = stats["logsstdev"](); + auto latency_log_geomsstddev_err = stats["logerror"](); + Measure<> latency_log_geomsstddev(latency_log_geomsstddev_val, latency_log_geomsstddev_err, "ms"); + result_rows.emplace_back(std::forward_as_tuple("latency, log-normal geom stddev", "latency.logstddev", std::move(latency_log_geomsstddev))); + + // Kolmogorov-Smirnov goodness-of-fit tests with Lilliefors correction + // (parameters estimated from data, not known a priori) + // Critical values at alpha=0.05: ~0.886/√n - 0.01/n (reject if D > Dcrit) + auto dcrit = [] (size_t n) -> double { + return 0.886 / std::sqrt(static_cast(n)) - 0.01 / static_cast(n); + }; + + auto ks_normal_val = stats["ks_normal"](); + Measure<> ks_normal(ks_normal_val, ""); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, normal distribution", "ks.normal", std::move(ks_normal))); + + auto ks_normal_dcrit = dcrit( stats_count ); + Measure<> ks_normal_crit(ks_normal_dcrit, ""); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, critical value (a=0.05)", "ks.normal.crit", std::move(ks_normal_crit))); + + auto ks_fit_str = (ks_normal_val > ks_normal_dcrit) ? "rejected" : "not rejected"; + Measure<> ks_normal_fit(ks_normal_val - ks_normal_dcrit, 0, ks_fit_str); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, fitness (normal)", "ks.normal.fit", std::move(ks_normal_fit))); + + auto ks_lognormal_val = stats["ks_lognormal"](); + Measure<> ks_lognormal(ks_lognormal_val, ""); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, log-normal distribution", "ks.lognormal", std::move(ks_lognormal))); + + auto ks_lognormal_dcrit = dcrit( stats_count ); + Measure<> ks_lognormal_crit(ks_lognormal_dcrit, ""); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, critical value (a=0.05)", "ks.lognormal.crit", std::move(ks_lognormal_crit))); + + ks_fit_str = (ks_lognormal_val > ks_lognormal_dcrit) ? "rejected" : "not rejected"; + Measure<> ks_lognormal_fit(ks_lognormal_val - ks_lognormal_dcrit, 0, ks_fit_str); + result_rows.emplace_back(std::forward_as_tuple("Lilliefors test, fitness (lognormal)", "ks.lognormal.fit", std::move(ks_lognormal_fit))); + // TPS is the number of "transactions" per second. // the meaning of "transaction" depends upon the tested API/algorithm @@ -249,7 +457,7 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz result_rows.emplace_back(std::forward_as_tuple("global throughput, average", "throughput.global", std::move(throughput_global_avg))); // wallclock_elapsed_ms is the total time elapsed (in ms). - Measure<> wallclock_elapsed_ms( wallclock_elapsed/nano_to_milli, epsilon, "ms" ); + Measure<> wallclock_elapsed_ms( wallclock_elapsed.count(), epsilon, "ms" ); result_rows.emplace_back(std::forward_as_tuple("wall clock", "wallclock", std::move(wallclock_elapsed_ms))); ConsoleTable results{"measure", "value", "error (+/-)", "unit", "rel. error" }; @@ -260,8 +468,8 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz std::get<0>(row), d2s(std::get<2>(row).value(),12), d2s(std::get<2>(row).error(),12), - std::get<2>(row).unit(), - d2s(std::get<2>(row).relerr()*100,3)+'%' }; + std::get<2>(row).unit(), + d2s(std::get<2>(row).relerr()*100,3)+'%' }; } std::cout << "Test case results:\n" << results << std::endl; @@ -282,6 +490,21 @@ ptree Executor::benchmark( P11Benchmark &benchmark, const size_t iter, const siz rv.add(thistestcase + std::get<1>(row) + ".relerr", d2s(std::get<2>(row).relerr())); } + // adding measured datapoints if requested + if(m_include_datapoints) { + ptree datapoints_array; + for(auto elapsed: elapsed_time_array) { + if(std::holds_alternative(elapsed.second)) { + for(auto &it: elapsed.first) { + ptree datapoint; + datapoint.put("", it.count()); + datapoints_array.push_back(std::make_pair("", datapoint)); + } + } + } + rv.add_child(thistestcase + "datapoints", datapoints_array); + } + // last error code, useful to identify when something crashes rv.add(thistestcase + "errorcode", errorcode(last_errcode)); } diff --git a/src/executor.hpp b/src/executor.hpp index 1b76cfd..7f618b7 100644 --- a/src/executor.hpp +++ b/src/executor.hpp @@ -25,6 +25,7 @@ #include #include #include "p11benchmark.hpp" +#include "units.hpp" #include "../config.h" using namespace Botan::PKCS11; @@ -35,24 +36,27 @@ class Executor const std::map > &m_vectors; std::vector > &m_sessions; const int m_numthreads; - double m_timer_res; - double m_timer_res_err; + nanoseconds_double_t m_timer_res; + nanoseconds_double_t m_timer_res_err; bool m_generate_session_keys; + bool m_include_datapoints; public: Executor( const std::map > &vectors, std::vector > &sessions, const int numthreads, - std::pair precision, - bool generate_session_keys) + std::pair precision, + bool generate_session_keys, + bool include_datapoints = false) : m_vectors(vectors), m_sessions(sessions), m_numthreads(numthreads), m_timer_res(precision.first), m_timer_res_err(precision.second), - m_generate_session_keys(generate_session_keys) + m_generate_session_keys(generate_session_keys), + m_include_datapoints(include_datapoints) { } Executor( const Executor &) = delete; @@ -61,7 +65,7 @@ class Executor Executor( Executor &&) = delete; Executor& operator=( Executor &&) = delete; - double precision() { return m_timer_res + m_timer_res_err; } + double precision() { return (m_timer_res + m_timer_res_err).count(); } ptree benchmark( P11Benchmark &benchmark, const size_t iter, const size_t skipiter, const std::forward_list shortlist ); diff --git a/src/keygenerator.cpp b/src/keygenerator.cpp index e7d6ea5..2ed9382 100644 --- a/src/keygenerator.cpp +++ b/src/keygenerator.cpp @@ -80,17 +80,17 @@ bool KeyGenerator::generate_des_key(std::string alias, unsigned int bits, std::s Mechanism mechanism { CKM_DES_KEY_GEN, nullptr, 0 }; switch(bits) { - case 64: - mechanism.mechanism = CKM_DES_KEY_GEN; - break; + case 64: + mechanism.mechanism = CKM_DES_KEY_GEN; + break; - case 128: - mechanism.mechanism = CKM_DES2_KEY_GEN; - break; + case 128: + mechanism.mechanism = CKM_DES2_KEY_GEN; + break; - case 192: - mechanism.mechanism = CKM_DES3_KEY_GEN; - break; + case 192: + mechanism.mechanism = CKM_DES3_KEY_GEN; + break; default: std::cerr << "Invalid key length for DES:" << bits << std::endl; @@ -289,7 +289,7 @@ bool KeyGenerator::generate_ecdh_keypair(std::string alias, unsigned int unused, } -void KeyGenerator::generate_key_generic(KeyGenerator::KeyType keytype, std::string alias, unsigned int bits, std::string curve) +bool KeyGenerator::generate_key_generic(KeyGenerator::KeyType keytype, std::string alias, unsigned int bits, std::string curve) { int th; bool rv = true; @@ -324,42 +324,47 @@ void KeyGenerator::generate_key_generic(KeyGenerator::KeyType keytype, std::stri } // recover futures. If one is false, return false - // TODO: replace with exception for(th=0;th allowed_keytypes { KeyType::RSA, KeyType::DES, KeyType::AES, KeyType::GENERIC }; auto match = allowed_keytypes.find( keytype ); if(match == allowed_keytypes.end()) { - throw KeyGenerationException { "Invalid keytype/argument combination" }; + std::cerr << "WARNING: Invalid keytype/argument combination" << std::endl; + return false; } return generate_key_generic(keytype, alias, bits, ""); } -void KeyGenerator::generate_key(KeyGenerator::KeyType keytype, std::string alias, std::string curve) { +bool KeyGenerator::generate_key(KeyGenerator::KeyType keytype, std::string alias, std::string curve) { std::set allowed_curves { "secp256r1", "secp384r1", "secp521r1" }; if(keytype != KeyType::ECDH && keytype != KeyType::ECDSA) { - throw KeyGenerationException { "Invalid keytype/argument combination" }; + std::cerr << "WARNING: Invalid keytype/argument combination" << std::endl; + return false; } auto match = allowed_curves.find(curve); if(match==allowed_curves.end()) { - throw KeyGenerationException { "Unknown/unmanaged key cureve given: " + curve }; + std::cerr << "WARNING: Unknown/unmanaged key curve given: " << curve << std::endl; + return false; } return generate_key_generic(keytype, alias, 0, curve); diff --git a/src/keygenerator.hpp b/src/keygenerator.hpp index 04dd127..a300d02 100644 --- a/src/keygenerator.hpp +++ b/src/keygenerator.hpp @@ -57,7 +57,7 @@ class KeyGenerator bool generate_ecdh_keypair(std::string alias, unsigned int unused, std::string curve, Session *session); bool generate_generic_key(std::string alias, unsigned int bits, std::string param, Session *session); - void generate_key_generic( KeyGenerator::KeyType keytype, std::string alias, unsigned int bits, std::string curve); + bool generate_key_generic( KeyGenerator::KeyType keytype, std::string alias, unsigned int bits, std::string curve); public: @@ -72,8 +72,8 @@ class KeyGenerator KeyGenerator( KeyGenerator &&) = delete; KeyGenerator& operator=( KeyGenerator &&) = delete; - void generate_key( KeyGenerator::KeyType keytype, std::string alias, unsigned int bits); - void generate_key( KeyGenerator::KeyType keytype, std::string alias, std::string curve); + bool generate_key( KeyGenerator::KeyType keytype, std::string alias, unsigned int bits); + bool generate_key( KeyGenerator::KeyType keytype, std::string alias, std::string curve); }; diff --git a/src/measure.cpp b/src/measure.cpp index 05a7219..d910414 100644 --- a/src/measure.cpp +++ b/src/measure.cpp @@ -29,11 +29,15 @@ std::ostream & operator<<(std::ostream &os, const Measure& measure) os << std::defaultfloat; } - os << ' ' << measure.m_unit - << " +/- " - << std::setprecision(measure.m_error_precision) - << measure.rounder(measure.m_error, measure.m_error_precision) - << std::setprecision(saved); + os << ' ' << measure.m_unit; + + if(measure.m_error > 0) { + os << " +/- " + << std::setprecision(measure.m_error_precision) + << measure.rounder(measure.m_error, measure.m_error_precision); + } + + os << std::setprecision(saved); return os; } diff --git a/src/measure.hpp b/src/measure.hpp index ee1d778..184aee1 100644 --- a/src/measure.hpp +++ b/src/measure.hpp @@ -6,7 +6,7 @@ #define MEASURE_HPP #include -#include +#include #include #include #include @@ -32,22 +32,39 @@ class Measure { int m_error_precision; // the precision (in decimal digits) given on measure error public: + // Constructor with error measurement Measure(T val, T err, S unit, int e_precision=2) : m_value(val), m_error(err), m_unit(unit), m_error_precision(e_precision), m_value_order(ceil(log10(val))), - m_error_order(ceil(log10(err))) { + m_error_order(err > 0 ? ceil(log10(err)) : 0) { - auto digits = [](T n) -> int { return ceil(abs(log10(n))) * copysign(1,log10(n)); }; + auto digits = [](T n) -> int { return ceil(std::abs(log10(n))) * copysign(1,log10(n)); }; - m_precision = digits(val) - digits(err) + 1; + if (err > 0) { + m_precision = digits(val) - digits(err) + 1; + } else { + // No error: use reasonable precision based on value magnitude + m_precision = 3; + } + } + + // Constructor without error (for exact values or statistics without uncertainty) + Measure(T val, S unit, int v_precision=3) : + m_value(val), + m_error(0), + m_unit(unit), + m_error_precision(0), + m_value_order(ceil(log10(std::abs(val) > 0 ? std::abs(val) : 1.0))), + m_error_order(0), + m_precision(v_precision) { } inline const T value() { return rounder(m_value, m_precision); } - inline const T error() { return rounder(m_error, m_error_precision); } - inline const T relerr() { return fabs(error()/value()); } + inline const T error() { return m_error > 0 ? rounder(m_error, m_error_precision) : 0; } + inline const T relerr() { return m_error > 0 ? fabs(error()/value()) : 0; } inline std::pair value_error() { return std::make_pair(value(),error()); } inline const S unit() { return m_unit; } diff --git a/src/p11benchmark.cpp b/src/p11benchmark.cpp index fa6d11a..435e570 100644 --- a/src/p11benchmark.cpp +++ b/src/p11benchmark.cpp @@ -46,13 +46,10 @@ P11Benchmark::P11Benchmark(const std::string &name, const std::string &label, Ob P11Benchmark::P11Benchmark(const P11Benchmark& other) : m_name(other.m_name), m_label(other.m_label), m_objectclass(other.m_objectclass), m_implementation(other.m_implementation) -{ - // std::cout << "copy constructor invoked for " << m_name << std::endl; -} +{ } P11Benchmark& P11Benchmark::operator=(const P11Benchmark& other) { - // std::cout << "copy assignment invoked for " << m_name << std::endl; m_name = other.m_name; m_label = other.m_label; m_objectclass = other.m_objectclass; @@ -83,10 +80,31 @@ std::string P11Benchmark::build_threaded_label(std::optional threadindex } -benchmark_result_t P11Benchmark::execute(Session *session, const std::vector &payload, size_t iterations, size_t skipiterations, std::optional threadindex) +// reset_timer(): initialize timer to zero and set starting point +void P11Benchmark::reset_timer() { - int return_code = CKR_OK; - std::vector records(iterations); + m_timer = milliseconds_double_t{0}; + m_last_clock = std::chrono::high_resolution_clock::now(); +} + +// suspend_timer(): pause timer accumulation by adding elapsed time since last resume +void P11Benchmark::suspend_timer() +{ + auto now = std::chrono::high_resolution_clock::now(); + m_timer += std::chrono::duration_cast(now - m_last_clock); + m_last_clock = now; // not really needed +} + +// resume_timer(): resume timer accumulation from current time +void P11Benchmark::resume_timer() +{ + m_last_clock = std::chrono::high_resolution_clock::now(); +} + +benchmark_result::benchmark_result_t P11Benchmark::execute(Session *session, const std::vector &payload, size_t iterations, size_t skipiterations, std::optional threadindex) +{ + benchmark_result::operation_outcome_t return_code = benchmark_result::Ok{}; + std::vector records(iterations); try { auto label = build_threaded_label(threadindex); // build threaded label (if needed) @@ -108,10 +126,6 @@ benchmark_result_t P11Benchmark::execute(Session *session, const std::vector greenlight_lck(greenlight_mtx); @@ -126,29 +140,31 @@ benchmark_result_t P11Benchmark::execute(Session *session, const std::vector lg{display_mtx}; + std::cerr << "ERROR: " << nfe.what() << std::endl; + return_code = benchmark_result::NotFound(); } catch (Botan::PKCS11::PKCS11_ReturnError &bexc) { - { - std::lock_guard lg{display_mtx}; - std::cerr << "ERROR:: " << bexc.what() - << " (" << errorcode(bexc.error_code()) << ")" << std::endl; - } - return_code = bexc.error_code(); // we print the exception, and move on - } catch (...) { - { - std::lock_guard lg{display_mtx}; - std::cerr << "ERROR: caught an unmanaged exception" << std::endl; - } - // bailing out + std::lock_guard lg{display_mtx}; + std::cerr << "ERROR:: " << bexc.what() + << " (" << errorcode(bexc.error_code()) << ")" + << std::endl; + return_code = benchmark_result::ApiErr{bexc.error_code()}; + } catch (...) { + std::lock_guard lg{display_mtx}; + std::cerr << "ERROR: caught an unmanaged exception" << std::endl; + // rethrow throw; } diff --git a/src/p11benchmark.hpp b/src/p11benchmark.hpp index 337c3d0..5ba3f35 100644 --- a/src/p11benchmark.hpp +++ b/src/p11benchmark.hpp @@ -22,20 +22,42 @@ #include #include #include +#include +#include +#include #include #include #include #include #include #include -#include +#include "units.hpp" #include "implementation.hpp" #include "../config.h" using namespace Botan::PKCS11; -using namespace boost::timer; -using benchmark_result_t = std::pair,int>; + + +namespace benchmark_result { + + // an exception to signal that an object was not found + class NotFound : public std::exception + { + public: + virtual const char* what() const noexcept override + { + return "Requested object not found"; + } + }; + + using Ok = std::monostate; // the default: all went well + // ApiErr is the type returned by Botan::PKCS11::PKCS11_Error::error_code() + using ApiErr = decltype(std::declval().error_code()); + + using operation_outcome_t = std::variant; + using benchmark_result_t = std::pair,operation_outcome_t>; +} class P11Benchmark { @@ -43,7 +65,11 @@ class P11Benchmark std::string m_label; ObjectClass m_objectclass; Implementation m_implementation; - boost::timer::cpu_timer m_t; // the timer can be stopped and resumed by crash test dummy + milliseconds_double_t m_timer {0}; + std::chrono::high_resolution_clock::time_point m_last_clock {}; + + inline milliseconds_double_t elapsed() const { return m_timer; }; + void reset_timer(); protected: std::vector m_payload; @@ -57,6 +83,9 @@ class P11Benchmark // cleanup(): perform cleanup after each call of crashtestdummy(), if needed virtual void cleanup(Session &session) { }; + // teardown(): perform teardown after all iterations are done, if needed + virtual void teardown(Session &session, Object &obj, std::optional threadindex) { }; + // rename(): change the name of the class after creation inline void rename(std::string newname) { m_name = newname; }; @@ -67,8 +96,10 @@ class P11Benchmark inline Implementation::Vendor flavour() {return m_implementation.vendor(); }; // timer primitives for the use of derived class - inline void suspend_timer() { m_t.stop(); } - inline void resume_timer() { m_t.resume(); } + // suspend_timer(): pause timer accumulation + void suspend_timer(); + // resume_timer(): resume timer accumulation + void resume_timer(); public: P11Benchmark(const std::string &name, @@ -90,7 +121,7 @@ class P11Benchmark virtual std::string features() const; - benchmark_result_t execute(Session* session, const std::vector &payload, size_t iterations, size_t skipiterations, std::optional threadindex); + benchmark_result::benchmark_result_t execute(Session* session, const std::vector &payload, size_t iterations, size_t skipiterations, std::optional threadindex); }; diff --git a/src/p11findobjects.cpp b/src/p11findobjects.cpp new file mode 100644 index 0000000..14b7d8e --- /dev/null +++ b/src/p11findobjects.cpp @@ -0,0 +1,168 @@ +// -*- mode: c++; c-file-style:"stroustrup"; -*- + +// +// Copyright (c) 2025 Mastercard +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include +#include +#include +#include +#include "p11findobjects.hpp" + + +P11FindObjectsBenchmark::P11FindObjectsBenchmark(const std::string &label) : + P11Benchmark( "Find objects (C_FindObjectsInit-C_FindObjects-C_FindObjectsFinal)", label, ObjectClass::SecretKey ) { } + +P11FindObjectsBenchmark::P11FindObjectsBenchmark(const P11FindObjectsBenchmark &other) : + P11Benchmark(other) { } + +inline P11FindObjectsBenchmark *P11FindObjectsBenchmark::clone() const { + return new P11FindObjectsBenchmark{*this}; +} + +void P11FindObjectsBenchmark::prepare(Session &session, Object &obj, std::optional threadindex) +{ + // The vector size determines the number of temporary keys to create + size_t num_objects = m_payload.size(); + + // Clear any previous temporary keys + m_temp_keys.clear(); + + // Generate temporary AES keys with unique labels + + for (size_t i = 0; i < num_objects; i++) { + // Generate unique label for each temporary key + std::stringstream label_stream; + label_stream << build_threaded_label(threadindex) << "-tmp-" << std::setw(6) << std::setfill('0') << i; + std::string temp_label = label_stream.str(); + + // Create attribute template for temporary AES secret key + AttributeContainer key_template; + key_template.add_class(ObjectClass::SecretKey); + key_template.add_string(AttributeType::Label, temp_label); + key_template.add_numeric(AttributeType::KeyType, static_cast(KeyType::Aes)); + key_template.add_bool(AttributeType::Token, false); // Session object + key_template.add_bool(AttributeType::Private, true); + key_template.add_numeric(AttributeType::ValueLen, static_cast(16)); // 128 bits + + auto aesmech = CK_MECHANISM{CKM_AES_KEY_GEN, nullptr, 0}; + auto key_handle = CK_OBJECT_HANDLE{}; + + // Create the key object + session.module()->C_GenerateKey( + session.handle(), + &aesmech, + key_template.data(), + key_template.count(), + &key_handle + ); + + m_temp_keys.push_back(key_handle); + } + + // Generate random indices for each iteration + // We'll generate 512 random indices and recycle them modulo + // We go random, in trying to annihilate any caching effects + size_t num_indices = 512; + m_random_indices.clear(); + m_random_indices.reserve(num_indices); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> dis(0, num_objects - 1); + + for (size_t i = 0; i < num_indices; i++) { + m_random_indices.push_back(dis(gen)); + } + + // Prepare base label template with placeholder for last 3 digits + // Format: "-tmp-000000" + std::stringstream base_stream; + base_stream << build_threaded_label(threadindex) << "-tmp-000000"; + m_base_label = base_stream.str(); + + // Prepare search template (will reuse and only modify the label) + m_search_template.add_class(ObjectClass::SecretKey); + m_search_template.add_string(AttributeType::Label, m_base_label); + + // Reset iteration counter + m_current_iteration = 0; +} + +void P11FindObjectsBenchmark::crashtestdummy(Session &session) +{ + // Get the random index for this iteration (wrap around if needed) + size_t target_index = m_random_indices[m_current_iteration % m_random_indices.size()]; + m_current_iteration++; + + // The following is a bit of a hack to modify + // only the last 3 digits of the label within the search template + + // Get pointer to the label string data + char* label_data = static_cast(m_search_template.attributes()[1].pValue); + size_t label_len = m_search_template.attributes()[1].ulValueLen; + + // Format the last 3 digits: units, tens, hundreds + label_data[label_len - 1] = '0' + (target_index % 10); + label_data[label_len - 2] = '0' + ((target_index / 10) % 10); + label_data[label_len - 3] = '0' + ((target_index / 100) % 10); + + // Now perform the FindObjects operations + // C_FindObjectsInit + session.module()->C_FindObjectsInit( + session.handle(), + m_search_template.data(), + static_cast(m_search_template.count()) + ); + + // C_FindObjects - search for the target object + ObjectHandle found_object; + Ulong found_count = 0; + session.module()->C_FindObjects( + session.handle(), + &found_object, + 1, // max_object_count + &found_count + ); + + // C_FindObjectsFinal + session.module()->C_FindObjectsFinal(session.handle()); + + // Verify we found exactly one object + if (found_count != 1) { + throw benchmark_result::NotFound(); + } +} + +void P11FindObjectsBenchmark::cleanup(Session &session) +{ + // No per-iteration cleanup needed for FindObjects +} + +void P11FindObjectsBenchmark::teardown(Session &session, Object &obj, std::optional threadindex) +{ + // Destroy all temporary keys created during prepare + // This is called once after all iterations are complete + for (auto handle : m_temp_keys) { + try { + session.module()->C_DestroyObject(session.handle(), handle); + } catch (const std::exception& e) { + std::cerr << "Error destroying temporary object: " << e.what() << std::endl; + } + } + m_temp_keys.clear(); +} diff --git a/src/p11findobjects.hpp b/src/p11findobjects.hpp new file mode 100644 index 0000000..2483f8c --- /dev/null +++ b/src/p11findobjects.hpp @@ -0,0 +1,46 @@ +// -*- mode: c++; c-file-style:"stroustrup"; -*- + +// +// Copyright (c) 2025 Mastercard +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#if !defined P11FINDOBJECTS_HPP +#define P11FINDOBJECTS_HPP + +#include "p11benchmark.hpp" + + +class P11FindObjectsBenchmark : public P11Benchmark +{ + std::vector m_temp_keys; + std::vector m_random_indices; // Random indices to search for + size_t m_current_iteration; // Current iteration counter + std::string m_base_label; // Base label template (will modify last 3 digits) + AttributeContainer m_search_template; // Search template (reused, label modified per iteration) + + virtual void prepare(Session &session, Object &obj, std::optional threadindex) override; + virtual void crashtestdummy(Session &session) override; + virtual void cleanup(Session &session) override; + virtual void teardown(Session &session, Object &obj, std::optional threadindex) override; + virtual P11FindObjectsBenchmark *clone() const override; + +public: + + P11FindObjectsBenchmark(const std::string &label); + P11FindObjectsBenchmark(const P11FindObjectsBenchmark & other); + +}; + +#endif // P11FINDOBJECTS_HPP diff --git a/src/p11perftest.cpp b/src/p11perftest.cpp index fda618f..6fe1cec 100644 --- a/src/p11perftest.cpp +++ b/src/p11perftest.cpp @@ -53,6 +53,7 @@ #include "keygenerator.hpp" #include "executor.hpp" #include "p11rsasig.hpp" +#include "p11rsapss.hpp" #include "p11oaepdec.hpp" #include "p11oaepenc.hpp" #include "p11oaepunw.hpp" @@ -62,6 +63,7 @@ #include "p11xorkeydataderive.hpp" #include "p11genrandom.hpp" #include "p11seedrandom.hpp" +#include "p11findobjects.hpp" #include "p11hmacsha1.hpp" #include "p11hmacsha256.hpp" #include "p11hmacsha512.hpp" @@ -104,13 +106,14 @@ int main(int argc, char **argv) int argiter, argskipiter; int argnthreads; bool json = false; + bool datapoints = false; std::fstream jsonout; bool generate_session_keys = true; po::options_description cliopts("command line options"); po::options_description envvars("environment variables"); // default coverage: RSA, ECDSA, HMAC, DES and AES - const auto default_tests {"rsa,ecdsa,ecdh,hmac,des,aes,xorder,rand,jwe,oaep,oaepenc,oaepunw"}; + const auto default_tests {"rsa,rsapss,ecdsa,ecdh,hmac,des,aes,xorder,rand,find,jwe,oaep,oaepenc,oaepunw"}; const auto default_vectors {"8,16,64,256,1024,4096"}; const auto default_keysizes{"rsa2048,rsa3072,rsa4096,ecnistp256,ecnistp384,ecnistp521,hmac160,hmac256,hmac512,des128,des192,aes128,aes192,aes256"}; const auto default_flavour{"generic"}; @@ -137,6 +140,7 @@ int main(int argc, char **argv) "(in addition to iterations)") ("json,j", "output results as JSON") ("jsonfile,o", po::value< std::string >(), "JSON output file name") + ("datapoints,d", "add array of measured points to JSON output (requires -j/--json)") ("coverage,c", po::value< std::string >()->default_value(default_tests), "coverage of test cases\n" "Note: the following test cases are compound:\n" @@ -202,6 +206,16 @@ int main(int argc, char **argv) std::cerr << cliopts << '\n'; } + if(vm.count("datapoints")) { + if(vm.count("json")) { + datapoints = true; + } else { + std::cerr << "When datapoints option is used, -j or --json is mandatory\n"; + std::cerr << cliopts << '\n'; + std::exit(EX_USAGE); + } + } + if (vm.count("nogenerate")) { generate_session_keys = false; } @@ -320,15 +334,19 @@ int main(int argc, char **argv) } auto epsilon = measure_clock_precision(); - std::cout << std::endl << "timer granularity (ns): " << epsilon.first << " +/- " << epsilon.second << "\n\n"; + std::cout << std::endl << "timer granularity (ns): " << epsilon.first.count() << " +/- " << epsilon.second.count() << "\n\n"; - Executor executor( testvecs, sessions, argnthreads, epsilon, generate_session_keys==true ); + Executor executor( testvecs, sessions, argnthreads, epsilon, generate_session_keys==true, datapoints ); + // Track which keys were successfully generated + std::set generated_keys; + // TODO: replace this whole spaghetti-like section with a more modular approach. Keys needed could be inferred from object classes. if(generate_session_keys) { KeyGenerator keygenerator( sessions, argnthreads, vendor ); std::cout << "Generating session keys for " << argnthreads << " thread(s)\n"; if(tests.contains("rsa") + || tests.contains("rsapss") || tests.contains("jwe") || tests.contains("jweoaepsha1") || tests.contains("jweoaepsha256") @@ -342,213 +360,374 @@ int main(int argc, char **argv) || tests.contains("oaepunwsha1") || tests.contains("oaepunwsha256") ) { - if(keysizes.contains("rsa2048")) keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-2048", 2048); - if(keysizes.contains("rsa3072")) keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-3072", 3072); - if(keysizes.contains("rsa4096")) keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-4096", 4096); + if(keysizes.contains("rsa2048")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-2048", 2048)) { + generated_keys.insert("rsa-2048"); + } else { + std::cerr << "WARNING: Failed to generate key 'rsa-2048', associated tests will be skipped\n"; + } + } + if(keysizes.contains("rsa3072")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-3072", 3072)) { + generated_keys.insert("rsa-3072"); + } else { + std::cerr << "WARNING: Failed to generate key 'rsa-3072', associated tests will be skipped\n"; + } + } + if(keysizes.contains("rsa4096")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::RSA, "rsa-4096", 4096)) { + generated_keys.insert("rsa-4096"); + } else { + std::cerr << "WARNING: Failed to generate key 'rsa-4096', associated tests will be skipped\n"; + } + } } if(tests.contains("ecdsa")) { - if(keysizes.contains("ecnistp256")) keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp256r1", "secp256r1"); - if(keysizes.contains("ecnistp384")) keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp384r1", "secp384r1"); - if(keysizes.contains("ecnistp521")) keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp521r1", "secp521r1"); + if(keysizes.contains("ecnistp256")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp256r1", "secp256r1")) { + generated_keys.insert("ecdsa-secp256r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdsa-secp256r1', associated tests will be skipped\n"; + } + } + if(keysizes.contains("ecnistp384")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp384r1", "secp384r1")) { + generated_keys.insert("ecdsa-secp384r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdsa-secp384r1', associated tests will be skipped\n"; + } + } + if(keysizes.contains("ecnistp521")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDSA, "ecdsa-secp521r1", "secp521r1")) { + generated_keys.insert("ecdsa-secp521r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdsa-secp521r1', associated tests will be skipped\n"; + } + } } if(tests.contains("ecdh")) { - if(keysizes.contains("ecnistp256")) keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp256r1", "secp256r1"); - if(keysizes.contains("ecnistp384")) keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp384r1", "secp384r1"); - if(keysizes.contains("ecnistp521")) keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp521r1", "secp521r1"); + if(keysizes.contains("ecnistp256")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp256r1", "secp256r1")) { + generated_keys.insert("ecdh-secp256r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdh-secp256r1', associated tests will be skipped\n"; + } + } + if(keysizes.contains("ecnistp384")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp384r1", "secp384r1")) { + generated_keys.insert("ecdh-secp384r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdh-secp384r1', associated tests will be skipped\n"; + } + } + if(keysizes.contains("ecnistp521")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::ECDH, "ecdh-secp521r1", "secp521r1")) { + generated_keys.insert("ecdh-secp521r1"); + } else { + std::cerr << "WARNING: Failed to generate key 'ecdh-secp521r1', associated tests will be skipped\n"; + } + } } if(tests.contains("hmac")) { - if(keysizes.contains("hmac160")) keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-160", 160); - if(keysizes.contains("hmac256")) keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-256", 256); - if(keysizes.contains("hmac512")) keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-512", 512); + if(keysizes.contains("hmac160")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-160", 160)) { + generated_keys.insert("hmac-160"); + } else { + std::cerr << "WARNING: Failed to generate key 'hmac-160', associated tests will be skipped\n"; + } + } + if(keysizes.contains("hmac256")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-256", 256)) { + generated_keys.insert("hmac-256"); + } else { + std::cerr << "WARNING: Failed to generate key 'hmac-256', associated tests will be skipped\n"; + } + } + if(keysizes.contains("hmac512")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "hmac-512", 512)) { + generated_keys.insert("hmac-512"); + } else { + std::cerr << "WARNING: Failed to generate key 'hmac-512', associated tests will be skipped\n"; + } + } } if(tests.contains("des") || tests.contains("desecb") || tests.contains("descbc")) { - if(keysizes.contains("des128")) keygenerator.generate_key(KeyGenerator::KeyType::DES, "des-128", 128); // DES2 - if(keysizes.contains("des192")) keygenerator.generate_key(KeyGenerator::KeyType::DES, "des-192", 192); // DES3 + if(keysizes.contains("des128")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::DES, "des-128", 128)) { // DES2 + generated_keys.insert("des-128"); + } else { + std::cerr << "WARNING: Failed to generate key 'des-128', associated tests will be skipped\n"; + } + } + if(keysizes.contains("des192")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::DES, "des-192", 192)) { // DES3 + generated_keys.insert("des-192"); + } else { + std::cerr << "WARNING: Failed to generate key 'des-192', associated tests will be skipped\n"; + } + } } if(tests.contains("aes") || tests.contains("aesecb") || tests.contains("aescbc") || tests.contains("aesgcm")) { - if(keysizes.contains("aes128")) keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-128", 128); - if(keysizes.contains("aes192")) keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-192", 192); - if(keysizes.contains("aes256")) keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-256", 256); + if(keysizes.contains("aes128")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-128", 128)) { + generated_keys.insert("aes-128"); + } else { + std::cerr << "WARNING: Failed to generate key 'aes-128', associated tests will be skipped\n"; + } + } + if(keysizes.contains("aes192")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-192", 192)) { + generated_keys.insert("aes-192"); + } else { + std::cerr << "WARNING: Failed to generate key 'aes-192', associated tests will be skipped\n"; + } + } + if(keysizes.contains("aes256")) { + if(keygenerator.generate_key(KeyGenerator::KeyType::AES, "aes-256", 256)) { + generated_keys.insert("aes-256"); + } else { + std::cerr << "WARNING: Failed to generate key 'aes-256', associated tests will be skipped\n"; + } + } } if(tests.contains("xorder")) { - keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "xorder-128", 128); + if(keygenerator.generate_key(KeyGenerator::KeyType::GENERIC, "xorder-128", 128)) { + generated_keys.insert("xorder-128"); + } else { + std::cerr << "WARNING: Failed to generate key 'xorder-128', associated tests will be skipped\n"; + } } if(tests.contains("rand")) { - keygenerator.generate_key(KeyGenerator::KeyType::AES, "rand-128", 128); // not really used + keygenerator.generate_key(KeyGenerator::KeyType::AES, "rand-128", 128); // not really used, ignore result + generated_keys.insert("rand-128"); // always insert, tests don't really need this key } + if(tests.contains("find")) { + keygenerator.generate_key(KeyGenerator::KeyType::AES, "find-128", 128); // not really used, ignore result + generated_keys.insert("find-128"); // always insert, tests don't really need this key + } + + } else { + + std::cout << "Using existing token keys (no generation)\n"; + + if(keysizes.contains("rsa2048")) generated_keys.insert("rsa-2048"); + if(keysizes.contains("rsa3072")) generated_keys.insert("rsa-3072"); + if(keysizes.contains("rsa4096")) generated_keys.insert("rsa-4096"); + if(keysizes.contains("ecnistp256")) { + generated_keys.insert("ecdsa-secp256r1"); + generated_keys.insert("ecdh-secp256r1"); + } + if(keysizes.contains("ecnistp384")) { + generated_keys.insert("ecdsa-secp384r1"); + generated_keys.insert("ecdh-secp384r1"); + } + if(keysizes.contains("ecnistp521")) { + generated_keys.insert("ecdsa-secp521r1"); + generated_keys.insert("ecdh-secp521r1"); + } + if(keysizes.contains("hmac160")) generated_keys.insert("hmac-160"); + if(keysizes.contains("hmac256")) generated_keys.insert("hmac-256"); + if(keysizes.contains("hmac512")) generated_keys.insert("hmac-512"); + if(keysizes.contains("des128")) generated_keys.insert("des-128"); + if(keysizes.contains("des192")) generated_keys.insert("des-192"); + if(keysizes.contains("aes128")) generated_keys.insert("aes-128"); + if(keysizes.contains("aes192")) generated_keys.insert("aes-192"); + if(keysizes.contains("aes256")) generated_keys.insert("aes-256"); + generated_keys.insert("xorder-128"); + generated_keys.insert("rand-128"); + generated_keys.insert("find-128"); } + // Helper lambda to check if key was generated (C++11 compatible) + auto has_key = [&generated_keys](const std::string& key) { + return generated_keys.find(key) != generated_keys.end(); + }; + std::forward_list benchmarks; // RSA PKCS#1 signature if(tests.contains("rsa")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-2048") ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-3072") ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-4096") ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-2048") ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-3072") ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11RSASigBenchmark("rsa-4096") ); + } + + // RSA-PSS signature + if(tests.contains("rsapss")) { + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11RSAPssBenchmark("rsa-2048") ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11RSAPssBenchmark("rsa-3072") ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11RSAPssBenchmark("rsa-4096") ); } // RSA PKCS#1 OAEP decryption if(tests.contains("oaep") || tests.contains("oaepsha1")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-2048", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-3072", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-4096", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-2048", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-3072", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-4096", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA1) ); } if(tests.contains("oaep") || tests.contains("oaepsha256")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-2048", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-3072", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-4096", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-2048", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-3072", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPDecryptBenchmark("rsa-4096", vendor, P11OAEPDecryptBenchmark::HashAlg::SHA256) ); } // RSA PKCS#1 OAEP encryption if(tests.contains("oaepenc") || tests.contains("oaepencsha1")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-2048", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-3072", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-4096", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-2048", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-3072", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-4096", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA1) ); } if(tests.contains("oaepenc") || tests.contains("oaepencsha256")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-2048", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-3072", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-4096", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-2048", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-3072", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPEncryptBenchmark("rsa-4096", vendor, P11OAEPEncryptBenchmark::HashAlg::SHA256) ); } // RSA PKCS#1 OAEP unwrapping if(tests.contains("oaepunw") || tests.contains("oaepunwsha1")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-2048", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-3072", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-4096", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-2048", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-3072", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-4096", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA1) ); } if(tests.contains("oaepunw") || tests.contains("oaepunwsha256")) { - if(keysizes.contains("rsa2048")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-2048", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa3072")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-3072", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); - if(keysizes.contains("rsa4096")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-4096", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-2048", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-3072", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) benchmarks.emplace_front( new P11OAEPUnwrapBenchmark("rsa-4096", vendor, P11OAEPUnwrapBenchmark::HashAlg::SHA256) ); } // JWE ( RSA OAEP + AES GCM ) if(tests.contains("jwe") || tests.contains("jweoaepsha1")) { - if(keysizes.contains("rsa2048")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM256) ); } - if(keysizes.contains("rsa3072")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM256) ); } - if(keysizes.contains("rsa4096")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA1, P11JWEBenchmark::SymAlg::GCM256) ); } } if(tests.contains("jwe") || tests.contains("jweoaepsha256")) { - if(keysizes.contains("rsa2048")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa2048") && has_key("rsa-2048")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-2048", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM256) ); } - if(keysizes.contains("rsa3072")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa3072") && has_key("rsa-3072")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-3072", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM256) ); } - if(keysizes.contains("rsa4096")) { - if(keysizes.contains("aes128")) + if(keysizes.contains("rsa4096") && has_key("rsa-4096")) { + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM128) ); - if(keysizes.contains("aes192")) + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM192) ); - if(keysizes.contains("aes256")) + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11JWEBenchmark("rsa-4096", vendor, P11JWEBenchmark::HashAlg::SHA256, P11JWEBenchmark::SymAlg::GCM256) ); } } if(tests.contains("ecdsa")) { - if(keysizes.contains("ecnistp256")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp256r1") ); - if(keysizes.contains("ecnistp384")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp384r1") ); - if(keysizes.contains("ecnistp521")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp521r1") ); + if(keysizes.contains("ecnistp256") && has_key("ecdsa-secp256r1")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp256r1") ); + if(keysizes.contains("ecnistp384") && has_key("ecdsa-secp384r1")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp384r1") ); + if(keysizes.contains("ecnistp521") && has_key("ecdsa-secp521r1")) benchmarks.emplace_front( new P11ECDSASigBenchmark("ecdsa-secp521r1") ); } if(tests.contains("ecdh")) { - if(keysizes.contains("ecnistp256")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp256r1") ); - if(keysizes.contains("ecnistp384")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp384r1") ); - if(keysizes.contains("ecnistp521")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp521r1") ); + if(keysizes.contains("ecnistp256") && has_key("ecdh-secp256r1")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp256r1") ); + if(keysizes.contains("ecnistp384") && has_key("ecdh-secp384r1")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp384r1") ); + if(keysizes.contains("ecnistp521") && has_key("ecdh-secp521r1")) benchmarks.emplace_front( new P11ECDH1DeriveBenchmark("ecdh-secp521r1") ); } if(tests.contains("hmac")) { - if(keysizes.contains("hmac160")) benchmarks.emplace_front( new P11HMACSHA1Benchmark("hmac-160") ); - if(keysizes.contains("hmac256")) benchmarks.emplace_front( new P11HMACSHA256Benchmark("hmac-256") ); - if(keysizes.contains("hmac512")) benchmarks.emplace_front( new P11HMACSHA512Benchmark("hmac-512") ); + if(keysizes.contains("hmac160") && has_key("hmac-160")) benchmarks.emplace_front( new P11HMACSHA1Benchmark("hmac-160") ); + if(keysizes.contains("hmac256") && has_key("hmac-256")) benchmarks.emplace_front( new P11HMACSHA256Benchmark("hmac-256") ); + if(keysizes.contains("hmac512") && has_key("hmac-512")) benchmarks.emplace_front( new P11HMACSHA512Benchmark("hmac-512") ); } if(tests.contains("des") || tests.contains("desecb")) { - if(keysizes.contains("des128")) benchmarks.emplace_front( new P11DES3ECBBenchmark("des-128") ); - if(keysizes.contains("des192")) benchmarks.emplace_front( new P11DES3ECBBenchmark("des-192") ); + if(keysizes.contains("des128") && has_key("des-128")) benchmarks.emplace_front( new P11DES3ECBBenchmark("des-128") ); + if(keysizes.contains("des192") && has_key("des-192")) benchmarks.emplace_front( new P11DES3ECBBenchmark("des-192") ); } if(tests.contains("des") || tests.contains("descbc")) { - if(keysizes.contains("des128")) benchmarks.emplace_front( new P11DES3CBCBenchmark("des-128") ); - if(keysizes.contains("des192")) benchmarks.emplace_front( new P11DES3CBCBenchmark("des-192") ); + if(keysizes.contains("des128") && has_key("des-128")) benchmarks.emplace_front( new P11DES3CBCBenchmark("des-128") ); + if(keysizes.contains("des192") && has_key("des-192")) benchmarks.emplace_front( new P11DES3CBCBenchmark("des-192") ); } if(tests.contains("aes") || tests.contains("aesecb")) { - if(keysizes.contains("aes128")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-128") ); - if(keysizes.contains("aes192")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-192") ); - if(keysizes.contains("aes256")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-256") ); + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-128") ); + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-192") ); + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11AESECBBenchmark("aes-256") ); } if(tests.contains("aes") || tests.contains("aescbc")) { - if(keysizes.contains("aes128")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-128") ); - if(keysizes.contains("aes192")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-192") ); - if(keysizes.contains("aes256")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-256") ); + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-128") ); + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-192") ); + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11AESCBCBenchmark("aes-256") ); } if(tests.contains("aes") || tests.contains("aesgcm")) { - if(keysizes.contains("aes128")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-128", vendor) ); - if(keysizes.contains("aes192")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-192", vendor) ); - if(keysizes.contains("aes256")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-256", vendor) ); + if(keysizes.contains("aes128") && has_key("aes-128")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-128", vendor) ); + if(keysizes.contains("aes192") && has_key("aes-192")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-192", vendor) ); + if(keysizes.contains("aes256") && has_key("aes-256")) benchmarks.emplace_front( new P11AESGCMBenchmark("aes-256", vendor) ); } if(tests.contains("xorder")) { - benchmarks.emplace_front( new P11XorKeyDataDeriveBenchmark("xorder-128") ); + if(has_key("xorder-128")) benchmarks.emplace_front( new P11XorKeyDataDeriveBenchmark("xorder-128") ); } if(tests.contains("rand")) { - benchmarks.emplace_front( new P11SeedRandomBenchmark("rand-128") ); - benchmarks.emplace_front( new P11GenerateRandomBenchmark("rand-128") ); + if(has_key("rand-128")) { + benchmarks.emplace_front( new P11SeedRandomBenchmark("rand-128") ); + benchmarks.emplace_front( new P11GenerateRandomBenchmark("rand-128") ); + } } + if(tests.contains("find")) { + if(has_key("find-128")) { + benchmarks.emplace_front( new P11FindObjectsBenchmark("find-128") ); + } + } benchmarks.reverse(); @@ -568,10 +747,6 @@ int main(int argc, char **argv) } } } - catch ( KeyGenerationException &e) { - std::cerr << "Ouch, got an error while generating keys: " << e.what() << '\n' - << "bailing out" << std::endl; - } catch ( std::exception &e) { std::cerr << "Ouch, got an error while execution: " << e.what() << '\n' << "diagnostic:\n" @@ -587,7 +762,7 @@ int main(int argc, char **argv) rv = EX_SOFTWARE; } } else { - std::cout << "The slot at index " << argslot << " has no token. Aborted.\n"; + std::cout << "The slot at index " << argslot << " has no token. Aborted.\n"; } return rv; } diff --git a/src/p11rsapss.cpp b/src/p11rsapss.cpp new file mode 100644 index 0000000..ac91f1d --- /dev/null +++ b/src/p11rsapss.cpp @@ -0,0 +1,61 @@ +// -*- mode: c++; c-file-style:"stroustrup"; -*- + +// +// Copyright (c) 2025 Mastercard +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include +#include "p11rsapss.hpp" + + +P11RSAPssBenchmark::P11RSAPssBenchmark(const std::string &label) : + P11Benchmark( "RSA-PSS Signature with SHA256 hashing (CKM_SHA256_RSA_PKCS_PSS)", label, ObjectClass::PrivateKey ), + m_pss_params { CKM_SHA256, CKG_MGF1_SHA256, 32 }, + m_mech_rsa_pss { CKM_SHA256_RSA_PKCS_PSS, &m_pss_params, sizeof(m_pss_params) } +{ } + + +P11RSAPssBenchmark::P11RSAPssBenchmark(const P11RSAPssBenchmark & other) : + P11Benchmark(other), + m_mech_rsa_pss { CKM_SHA256_RSA_PKCS_PSS, nullptr, 0 } +{ + // Copy PSS parameters + m_pss_params = other.m_pss_params; + m_mech_rsa_pss.pParameter = &m_pss_params; + m_mech_rsa_pss.ulParameterLen = sizeof(m_pss_params); +} + + +inline P11RSAPssBenchmark *P11RSAPssBenchmark::clone() const { + return new P11RSAPssBenchmark{*this}; +} + +void P11RSAPssBenchmark::prepare(Session &session, Object &obj, std::optional threadindex) +{ + m_objhandle = obj.handle(); + m_signature.resize(m_signature_size); + + // Compute SHA-256 hash of the payload + std::unique_ptr sha256(Botan::HashFunction::create("SHA-256")); + sha256->update(m_payload.data(), m_payload.size()); + m_hash = sha256->final(); +} + +void P11RSAPssBenchmark::crashtestdummy(Session &session) +{ + Ulong signature_len = m_signature.size(); + session.module()->C_SignInit(session.handle(), &m_mech_rsa_pss, m_objhandle); + session.module()->C_Sign(session.handle(), m_hash.data(), m_hash.size(), m_signature.data(), &signature_len); +} diff --git a/src/p11rsapss.hpp b/src/p11rsapss.hpp new file mode 100644 index 0000000..44bf4a7 --- /dev/null +++ b/src/p11rsapss.hpp @@ -0,0 +1,46 @@ +// -*- mode: c++; c-file-style:"stroustrup"; -*- + +// +// Copyright (c) 2025 Mastercard +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#if !defined P11RSAPSS_HPP +#define P11RSAPSS_HPP + +#include "p11benchmark.hpp" +#include + +class P11RSAPssBenchmark : public P11Benchmark +{ + static constexpr auto m_signature_size = 512; // Max RSA signature size (4096 bits) + + CK_RSA_PKCS_PSS_PARAMS m_pss_params; + Mechanism m_mech_rsa_pss; + Botan::secure_vector m_hash; + std::vector m_signature; + ObjectHandle m_objhandle; + + virtual void prepare(Session &session, Object &obj, std::optional threadindex) override; + virtual void crashtestdummy(Session &session) override; + virtual P11RSAPssBenchmark *clone() const override; + +public: + + P11RSAPssBenchmark(const std::string &name); + P11RSAPssBenchmark(const P11RSAPssBenchmark & other); + +}; + +#endif // P11RSAPSS_HPP diff --git a/src/p11seedrandom.cpp b/src/p11seedrandom.cpp index 3fa47e6..504f12a 100644 --- a/src/p11seedrandom.cpp +++ b/src/p11seedrandom.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include "p11seedrandom.hpp" @@ -37,7 +38,8 @@ inline P11SeedRandomBenchmark *P11SeedRandomBenchmark::clone() const { void P11SeedRandomBenchmark::prepare(Session &session, Object &obj, std::optional threadindex) { m_seed.resize( m_payload.size() ); -} + Botan::AutoSeeded_RNG rng; + rng.randomize(m_seed.data(), m_seed.size());} void P11SeedRandomBenchmark::crashtestdummy(Session &session) { diff --git a/src/testcoverage.cpp b/src/testcoverage.cpp index e378c87..95456da 100644 --- a/src/testcoverage.cpp +++ b/src/testcoverage.cpp @@ -34,6 +34,10 @@ TestCoverage::TestCoverage(std::string tocover) m_algo_coverage.insert(AlgoCoverage::rsa); break; + case "rsapss"_hash: + m_algo_coverage.insert(AlgoCoverage::rsapss); + break; + case "ecdsa"_hash: m_algo_coverage.insert(AlgoCoverage::ecdsa); break; @@ -82,6 +86,10 @@ TestCoverage::TestCoverage(std::string tocover) m_algo_coverage.insert(AlgoCoverage::rand); break; + case "find"_hash: + m_algo_coverage.insert(AlgoCoverage::find); + break; + case "jwe"_hash: m_algo_coverage.insert(AlgoCoverage::jwe); break; @@ -156,6 +164,10 @@ bool TestCoverage::contains(std::string algo) return contains(AlgoCoverage::rsa); break; + case "rsapss"_hash: + return contains(AlgoCoverage::rsapss); + break; + case "ecdsa"_hash: return contains(AlgoCoverage::ecdsa); break; @@ -204,6 +216,10 @@ bool TestCoverage::contains(std::string algo) return contains(AlgoCoverage::rand); break; + case "find"_hash: + return contains(AlgoCoverage::find); + break; + case "jwe"_hash: return contains(AlgoCoverage::jwe); break; diff --git a/src/testcoverage.hpp b/src/testcoverage.hpp index 1de7ad6..01a818a 100644 --- a/src/testcoverage.hpp +++ b/src/testcoverage.hpp @@ -26,6 +26,7 @@ struct TestCoverage { enum class AlgoCoverage { rsa, // RSA + rsapss, // RSA-PSS ecdsa, // ECDSA ecdh, // ECDH hmac, // HMAC @@ -38,6 +39,7 @@ struct TestCoverage aesgcm, // AES GCM xorder, // XOR derivation rand, // Random number generation + find, // Find objects jwe, // JWE decryption (RFC7516) jweoaepsha1, // subset with OAEP(SHA1) jweoaepsha256, // subset with OAEP(SHA256) @@ -46,10 +48,10 @@ struct TestCoverage oaepsha256, // PKCS#1 OAEP decryption (SHA256) oaepunw, // PKCS#1 OAEP unwrapping (all hashing algorithms) oaepunwsha1, // PKCS#1 OAEP unwrapping (SHA1) - oaepunwsha256, // PKCS#1 OAEP unwrapping (SHA256) + oaepunwsha256, // PKCS#1 OAEP unwrapping (SHA256) oaepenc, // PKCS#1 OAEP encryption (all hashing algorithms) oaepencsha1, // PKCS#1 OAEP encryption (SHA1) - oaepencsha256, // PKCS#1 OAEP encryption (SHA256) + oaepencsha256, // PKCS#1 OAEP encryption (SHA256) }; TestCoverage(std::string tocover); diff --git a/src/timeprecision.cpp b/src/timeprecision.cpp index 89c31c9..98d82d4 100644 --- a/src/timeprecision.cpp +++ b/src/timeprecision.cpp @@ -16,16 +16,6 @@ // limitations under the License. // -// this code is inspired from Boost library test sample -// https://www.boost.org/doc/libs/1_72_0/libs/timer/test/cpu_timer_info.cpp - -// Original copyright notice: -// -// Copyright Beman Dawes 2006 -// Distributed under the Boost Software License, Version 1.0. -// See http://www.boost.org/LICENSE_1_0.txt - - #include "timeprecision.hpp" #include @@ -44,12 +34,11 @@ using namespace boost::accumulators; // reference: https://www.statsdirect.com/help/basic_descriptive_statistics/standard_deviation.htm // returned time is in ns -// TODO: using litterals for setting units -pair measure_clock_precision(int iter) +pair measure_clock_precision(int iter) { using clock = std::chrono::high_resolution_clock; - accumulator_set > te; + accumulator_set > acc; for (int i = 0; i < iter; ++i) { auto start = clock::now(); @@ -57,17 +46,19 @@ pair measure_clock_precision(int iter) while (current == start) { current = clock::now(); } - const auto delta = std::chrono::duration_cast(current - start).count(); - te(static_cast(delta)); + const auto delta = std::chrono::duration_cast(current - start); + acc(delta.count()); } - auto n = boost::accumulators::count(te); + auto n = boost::accumulators::count(acc); // compute estimator for variance: (n)/(n-1)*variance - auto est_variance = (variance(te) * n ) / (n-1); + auto est_variance = (variance(acc) * n ) / (n-1); // compute standard error - double std_err = sqrt( est_variance/n ) * 2; // we take k=2, so 95% of measures are within interval - - return make_pair(mean(te), std_err); + // we take k=2, so 95% of measures are within interval + auto std_err = nanoseconds_double_t( sqrt( est_variance/n ) * 2); + auto avg = nanoseconds_double_t(mean(acc)); + + return {avg, std_err}; } diff --git a/src/timeprecision.hpp b/src/timeprecision.hpp index a676a09..870c868 100644 --- a/src/timeprecision.hpp +++ b/src/timeprecision.hpp @@ -20,7 +20,8 @@ #define TIMEPRECISION_H #include +#include "units.hpp" -std::pair measure_clock_precision(int iter=100); +std::pair measure_clock_precision(int iter=100); #endif // TIMEPRECISION_H diff --git a/src/units.hpp b/src/units.hpp new file mode 100644 index 0000000..d933aa0 --- /dev/null +++ b/src/units.hpp @@ -0,0 +1,32 @@ +// -*- mode: c++; c-file-style:"stroustrup"; -*- + +// +// Copyright (c) 2025 Mastercard +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// units.hpp: Definitions of time units with double precision + +#if !defined(UNITS_H) +#define UNITS_H + +#include +#include + +using nanoseconds_double_t = std::chrono::duration; +using microseconds_double_t = std::chrono::duration; +using milliseconds_double_t = std::chrono::duration; + + +#endif // UNITS_H