Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,4 @@ frameworks/pinpoint-java/pinpoint/pinpoint-web-starter-*.jar
frameworks/pinpoint-java/pinpoint/pinpoint-agent-*
frameworks/pinpoint-java/pinpoint/pinpoint.tar.gz
frameworks/pinpoint-java/scripts/*.json
zipkin.jar
11 changes: 11 additions & 0 deletions frameworks/OpenTelemetry-python/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
venv/
__pycache__/
kieker-lang-pack-python/
results/
config.ini
*.log
*.bak
*.zip
results-OpenTelemetry-python/
.DS_Store
zipkin.jar
52 changes: 52 additions & 0 deletions frameworks/OpenTelemetry-python/benchmark.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
DIR=$(cd "$(dirname "$0")" && pwd)
RAW_MAIN_DIR="$DIR/../../"

if command -v cygpath &>/dev/null; then
BASE_DIR=$(cygpath -w "$DIR")
RAW_MAIN_DIR="$DIR/../../"
MAIN_DIR=$(cygpath -w "$RAW_MAIN_DIR")
else
BASE_DIR="$DIR"
MAIN_DIR="$DIR/../../"
fi

# init.sh loads common-functions.sh and creates directories
if [ -f "${RAW_MAIN_DIR}/init.sh" ]; then
source "${RAW_MAIN_DIR}/init.sh"
else
echo "Missing library: ${RAW_MAIN_DIR}/init.sh"
exit 1
fi

source "$DIR/config.rc"
source "$DIR/labels.sh"
source "$DIR/functions.sh"

echo " # Preparing Environment..."
if [ -d "$VENV_DIR" ]; then rm -rf "$VENV_DIR"; fi

# For Windows/Cygwin compatibility
if command -v python3 &>/dev/null; then
PYTHON_EXE=python3
else
PYTHON_EXE=python
fi

$PYTHON_EXE -m venv "$VENV_DIR"
if [ -f "$VENV_DIR/Scripts/activate" ]; then
source "$VENV_DIR/Scripts/activate"
else
source "$VENV_DIR/bin/activate"
fi

pip install -q --upgrade pip
pip install -q -r "$REQUIREMENTS_FILE"
opentelemetry-bootstrap -a install

cp "$CONFIG_TEMPLATE" "$CONFIG_FILE"

echo " # Starting Benchmark with $NUM_OF_LOOPS loops"
executeAllLoops

deactivate
echo " # Completed."
10 changes: 10 additions & 0 deletions frameworks/OpenTelemetry-python/config.ini.template
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[Benchmark]
total_calls = 2000000
recursion_depth = 10
method_time = 0
config_path = /tmp/monitoring.ini
inactive = False
# so it doesn't use kieker instrumentation
instrumentation_on = False
approach = 1
output_filename = results.csv
9 changes: 9 additions & 0 deletions frameworks/OpenTelemetry-python/config.rc
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
FRAMEWORK_NAME="OpenTelemetry-python"

VENV_DIR="$BASE_DIR/venv"
REQUIREMENTS_FILE="$BASE_DIR/requirements.txt"
CONFIG_TEMPLATE="$BASE_DIR/config.ini.template"
CONFIG_FILE="$BASE_DIR/config.ini"

MOOBENCH_BIN_PY="$MAIN_DIR/tools/pybenchmark/benchmark.py"

83 changes: 83 additions & 0 deletions frameworks/OpenTelemetry-python/functions.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@


# helper to inject filename
function updateConfigFilename {
local filename=$1
grep -v "output_filename" "$CONFIG_TEMPLATE" > "$CONFIG_FILE"
echo "output_filename = $filename" >> "$CONFIG_FILE"
}

function get_os_path {
local raw_path=$1
if command -v cygpath &>/dev/null; then cygpath -w "$raw_path"; else echo "$raw_path"; fi
}

function runNoInstrumentation {
local k=$1
local i=$2

# Define Paths using variables from init.sh (RESULTS_DIR) and config.rc (RAWFN)
local RAW_CSV="${RESULTS_DIR}/raw-${i}-${RECURSION_DEPTH}-${k}.csv"
local CSV_FILE=$(get_os_path "$RAW_CSV")
local LOG_FILE="${RESULTS_DIR}/output-raw-${i}-${RECURSION_DEPTH}-${k}.txt"

echo " # Running Config $k: ${TITLE[$k]} (Iter $i)"

updateConfigFilename "$CSV_FILE"

ENABLE_OTEL="false" \
python3 "$MOOBENCH_BIN_PY" "$CONFIG_FILE" > "$LOG_FILE" 2>&1
}

function runOpenTelemetryNoExport {
local k=$1
local i=$2

local RAW_CSV="${RESULTS_DIR}/raw-${i}-${RECURSION_DEPTH}-${k}.csv"
local CSV_FILE=$(get_os_path "$RAW_CSV")
local LOG_FILE="${RESULTS_DIR}/output-raw-${i}-${RECURSION_DEPTH}-${k}.txt"

echo " # Running Config $k: ${TITLE[$k]} (Iter $i)"
updateConfigFilename "$CSV_FILE"

ENABLE_OTEL="true" \
OTEL_TRACES_EXPORTER="none" \
OTEL_METRICS_EXPORTER="none" \
OTEL_LOGS_EXPORTER="none" \
python3 "$MOOBENCH_BIN_PY" "$CONFIG_FILE" > "$LOG_FILE" 2>&1
}

function runOpenTelemetryZipkin {
local k=$1
local i=$2

local RAW_CSV="${RESULTS_DIR}/raw-${i}-${RECURSION_DEPTH}-${k}.csv"
local CSV_FILE=$(get_os_path "$RAW_CSV")
local LOG_FILE="${RESULTS_DIR}/output-raw-${i}-${RECURSION_DEPTH}-${k}.txt"

startZipkin
echo " # Running Config $k: ${TITLE[$k]} (Iter $i)"
updateConfigFilename "$CSV_FILE"

ENABLE_OTEL="true" \
OTEL_SERVICE_NAME="moobench-python" \
OTEL_TRACES_EXPORTER="zipkin" \
OTEL_EXPORTER_ZIPKIN_ENDPOINT="http://localhost:9411/api/v2/spans" \
OTEL_METRICS_EXPORTER="none" \
OTEL_LOGS_EXPORTER="none" \
python3 "$MOOBENCH_BIN_PY" "$CONFIG_FILE" > "$LOG_FILE" 2>&1

stopBackgroundProcess
}

function executeBenchmark {
for index in $MOOBENCH_CONFIGURATIONS
do
case $index in
0) runNoInstrumentation 0 $i ;;
1) runOpenTelemetryNoExport 1 $i ;;
2) runOpenTelemetryZipkin 2 $i ;;
esac
sleep 1
done
}
4 changes: 4 additions & 0 deletions frameworks/OpenTelemetry-python/labels.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
MOOBENCH_CONFIGURATIONS="0 1 2"
TITLE[0]="No Instrumentation"
TITLE[1]="OpenTelemetry No Export"
TITLE[2]="OpenTelemetry Zipkin"
4 changes: 4 additions & 0 deletions frameworks/OpenTelemetry-python/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
opentelemetry-distro
opentelemetry-exporter-zipkin
opentelemetry-sdk
opentelemetry-api
140 changes: 85 additions & 55 deletions tools/pybenchmark/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,83 +1,113 @@
# -*- coding: utf-8 -*-
# standard import
import sys
import time
import configparser
import re
# instrumentation
import os

try:
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.exporter.zipkin.json import ZipkinExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.semconv.resource import ResourceAttributes
OTEL_AVAILABLE = True
except ImportError:
OTEL_AVAILABLE = False

try:
from monitoring.controller import SingleMonitoringController
from tools.importhookast import InstrumentOnImportFinder
from tools.importhook import PostImportFinder
KIEKER_AVAILABLE = True
except ImportError:
KIEKER_AVAILABLE = False

# read argumetns
if len(sys.argv) < 2:
print('Path to the benchmark configuration file was not provided.')
sys.exit(1)

parser = configparser.ConfigParser()
parser.read(sys.argv[1])

total_calls =int(parser.get('Benchmark','total_calls'))
recursion_depth = int(parser.get('Benchmark','recursion_depth'))
method_time = int(parser.get('Benchmark','method_time'))
ini_path = parser.get('Benchmark','config_path')
inactive = parser.getboolean('Benchmark', 'inactive')
instrumentation_on = parser.getboolean('Benchmark', 'instrumentation_on')
approach = parser.getint('Benchmark', 'approach')
output_filename = parser.get('Benchmark', 'output_filename')

# debug
#print(f"total_calls = {total_calls}")
#print(f"recurison_depth = {recursion_depth}")
#print(f"method_time = {method_time}")

# instrument
from monitoring.controller import SingleMonitoringController
from tools.importhookast import InstrumentOnImportFinder
from tools.importhook import PostImportFinder
ex =[]
some_var = SingleMonitoringController(ini_path)
if instrumentation_on:
# print ('Instrumentation is on.')
try:
total_calls = int(parser.get('Benchmark','total_calls'))
recursion_depth = int(parser.get('Benchmark','recursion_depth'))
method_time = int(parser.get('Benchmark','method_time'))
output_filename = parser.get('Benchmark', 'output_filename')
ini_path = parser.get('Benchmark','config_path')
inactive = parser.getboolean('Benchmark', 'inactive')
instrumentation_on = parser.getboolean('Benchmark', 'instrumentation_on')
approach = parser.getint('Benchmark', 'approach')
except Exception as e:
print(f"Error parsing config: {e}")
sys.exit(1)

# Setup Kieker only if requested
if KIEKER_AVAILABLE and instrumentation_on:
# This segment runs only for the original Kieker framework logic
some_var = SingleMonitoringController(ini_path)
if approach == 2:
# print("2nd instrumentation approach is chosen")
#if not inactive:
#print("Instrumentation is activated")
#else:
# print("Instrumentation is not activated")

sys.meta_path.insert(0, InstrumentOnImportFinder(ignore_list=ex, empty=inactive, debug_on=False))
sys.meta_path.insert(0, InstrumentOnImportFinder(ignore_list=[], empty=inactive, debug_on=False))
else:
#print("1st instrumentation approach is chosen")
#if not inactive:
# print("Instrumentation is activated")
#else:
# print("Instrumentation is not activated")

pattern_object = re.compile('monitored_application')
exclude_modules = list()
sys.meta_path.insert(0, PostImportFinder(pattern_object, exclude_modules, empty = inactive))
#else:
# print('Instrumentation is off')
sys.meta_path.insert(0, PostImportFinder(pattern_object, exclude_modules, empty=inactive))

# opentelemetry manual instrumentation
tracer = None
# Only enable if installed and requested via environment variable
enable_otel = os.environ.get("ENABLE_OTEL", "false").lower() == "true"

if OTEL_AVAILABLE and enable_otel:
resource = Resource(attributes={
ResourceAttributes.SERVICE_NAME: "moobench-python"
})

provider = TracerProvider(resource=resource)

exporter_type = os.environ.get('OTEL_TRACES_EXPORTER', 'none')

if exporter_type == 'zipkin':
print("Initializing Zipkin Exporter...")
zipkin_endpoint = os.environ.get('OTEL_EXPORTER_ZIPKIN_ENDPOINT', "http://localhost:9411/api/v2/spans")
zipkin_exporter = ZipkinExporter(endpoint=zipkin_endpoint)
provider.add_span_processor(BatchSpanProcessor(zipkin_exporter))

trace.set_tracer_provider(provider)
tracer = trace.get_tracer("moobench.benchmark")

import monitored_application

# setup
output_file = open(output_filename, "w")
print(f"Writing results to: {output_filename}")
print(f"Starting execution: {total_calls} calls.")

output_file = open(output_filename, "w")
thread_id = 0

start_ns = 0
stop_ns = 0
timings = []

# run experiment
for i in range(total_calls):

start_ns = time.time_ns()
monitored_application.monitored_method(method_time, recursion_depth)

if OTEL_AVAILABLE and tracer:
with tracer.start_as_current_span("monitored_method"):
monitored_application.monitored_method(method_time, recursion_depth)
else:
monitored_application.monitored_method(method_time, recursion_depth)

stop_ns = time.time_ns()
timings.append(stop_ns-start_ns)
if i%100000 == 0:
print(timings[-1])

duration = stop_ns - start_ns

if i % 100000 == 0 and i > 0:
print(f"Call {i}: {duration} ns")

output_file.write(f"{thread_id};{timings[-1]}\n")
output_file.write(f"{thread_id};{duration}\n")

output_file.close()

# end
if OTEL_AVAILABLE and os.environ.get('OTEL_TRACES_EXPORTER') == 'zipkin':
print("Flushing traces to Zipkin (waiting 5s)...")
time.sleep(5)

print("Benchmark finished.")
Loading