Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions v2v/tests/cfg/nbdkit/nbdkit.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -238,3 +238,9 @@
only source_none..dest_none
checkpoint = 'cve_starttls'
version_required = "[nbdkit-1.26.5-1,nbdkit-1.45)"
- parameter:
only source_none..dest_none
variants:
- name:
version_required = "[nbdkit-1.46,)"
checkpoint = 'test_nbdkit_instance_name'
72 changes: 72 additions & 0 deletions v2v/tests/src/nbdkit/nbdkit.py
Original file line number Diff line number Diff line change
Expand Up @@ -739,6 +739,76 @@ def check_blocksize_constraints():
if field not in cmd_output:
test.fail('Expected field "%s" not found in export output: %s' % (field, cmd_output))

def test_nbdkit_instance_name():
from subprocess import Popen, TimeoutExpired

# Define our ports and a log file path within the test's debug directory
instances = [
{"name": "server-gold", "port": 10809},
{"name": "server-silver", "port": 10810}
]
log_path = os.path.join(data_dir.get_tmp_dir(), "nbdkit_debug.log")
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# First, let's examine the file at lines 750, 789-798, and surrounding context
wc -l v2v/tests/src/nbdkit/nbdkit.py

Repository: autotest/tp-libvirt

Length of output: 97


🏁 Script executed:

# Read the relevant sections with context
sed -n '740,810p' v2v/tests/src/nbdkit/nbdkit.py

Repository: autotest/tp-libvirt

Length of output: 3219


🏁 Script executed:

# Also check lines 789-798
sed -n '785,805p' v2v/tests/src/nbdkit/nbdkit.py

Repository: autotest/tp-libvirt

Length of output: 1066


🏁 Script executed:

# Check if there are other logging patterns in this file
rg "log_path|logdir|outputdir|self.log" v2v/tests/src/nbdkit/nbdkit.py -n -A 2 -B 2

Repository: autotest/tp-libvirt

Length of output: 914


🏁 Script executed:

# Check Avocado test class and imports in this file
head -50 v2v/tests/src/nbdkit/nbdkit.py

Repository: autotest/tp-libvirt

Length of output: 1474


🏁 Script executed:

# Search for how logs are typically handled in this test suite
rg "\.log\(|logdir|outputdir" v2v/tests/src/nbdkit/ -n -l

Repository: autotest/tp-libvirt

Length of output: 45


🏁 Script executed:

# Search for test.logdir or similar usage in the test suite
rg "test\.logdir|test\.outputdir|test\.logfile" v2v/tests/src/ -n | head -20

Repository: autotest/tp-libvirt

Length of output: 45


🏁 Script executed:

# Check other nbdkit tests or similar tests for logging patterns
rg "data_dir\.get_tmp_dir\(\)" v2v/tests/src/nbdkit/ -n -B 2 -A 2

Repository: autotest/tp-libvirt

Length of output: 7430


🏁 Script executed:

# Check for process output redirection patterns in similar tests
rg "with open.*log" v2v/tests/src/ -n -l | head -10

Repository: autotest/tp-libvirt

Length of output: 93


🏁 Script executed:

# Look for Avocado test logging API usage in this file
rg "self\.log\(|LOG\." v2v/tests/src/nbdkit/nbdkit.py -n | head -20

Repository: autotest/tp-libvirt

Length of output: 1061


Move nbdkit debug log to test artifacts directory.

The nbdkit debug output containing instance name validation is written to a temporary directory and discarded after test completion. This prevents passing tests from surfacing the debug information needed for troubleshooting. Store the log file in the test results directory (using test.logdir or similar Avocado test artifact API) and/or emit the validated instance name lines through the test logger so the information persists in test artifacts.

Also applies to: 789-798

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@v2v/tests/src/nbdkit/nbdkit.py` at line 750, The nbdkit debug log is being
written to a temp directory (log_path = os.path.join(data_dir.get_tmp_dir(),
"nbdkit_debug.log")) which gets discarded; change the path to store the log in
the test artifact directory (use test.logdir or the Avocado artifact API)
instead of data_dir.get_tmp_dir(), and also emit the validated instance-name
lines to the test logger (e.g., via self.log or the test object logger) so they
appear in test artifacts; apply the same change for the similar block around
lines referencing the same log_path logic (the 789-798 region) so both locations
persist debug output to the test results directory.


# Store the Popen objects here
handles = []
try:
with open(log_path, "a") as log_file:
Comment thread
ganeshhubale marked this conversation as resolved.
Outdated
for inst in instances:
LOG.info(f"Starting nbdkit instance: {inst['name']}")

# Construct the command
# --foreground: Keeps it attached so we can redirect output easily
# --debug: Required to see the instance name in logs
cmd = [
"nbdkit", "-f", "-v",
"--name", inst['name'],
"--port", str(inst['port']),
"memory", "10M"
]
# Start nbdkit as a background process, piping stderr to our log file
p = Popen(cmd, stderr=log_file, stdout=log_file)
handles.append((inst, p))

for inst, p in handles:
if p.poll() is not None:
test.fail(f"nbdkit {inst['name']} failed to start! Check {log_path}")

# Allow nbdkit instances time to start listening
import time
time.sleep(1)

# Trigger activity with qemu-io
for inst in instances:
LOG.info(f"Writing to {inst['name']} on port {inst['port']}")
qemu_cmd = f"qemu-io -f raw nbd://localhost:{inst['port']} -c \'write 0 1M\'"

result = process.run(qemu_cmd, shell=True)
if result.exit_status != 0:
test.fail(f"qemu-io failed for {inst['name']}")
Comment thread
ganeshhubale marked this conversation as resolved.
Comment thread
ganeshhubale marked this conversation as resolved.
Comment on lines +785 to +787
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

find . -type f -name "nbdkit.py" | head -20

Repository: autotest/tp-libvirt

Length of output: 95


🏁 Script executed:

if [ -f "v2v/tests/src/nbdkit/nbdkit.py" ]; then
  sed -n '780,795p' "v2v/tests/src/nbdkit/nbdkit.py"
else
  echo "File not found at expected path"
  find . -name "nbdkit.py" -type f 2>/dev/null | head -5
fi

Repository: autotest/tp-libvirt

Length of output: 831


🏁 Script executed:

if [ -f "v2v/tests/src/nbdkit/nbdkit.py" ]; then
  head -50 "v2v/tests/src/nbdkit/nbdkit.py" | grep -E "^import|^from" | head -20
fi

Repository: autotest/tp-libvirt

Length of output: 465


🏁 Script executed:

if [ -f "v2v/tests/src/nbdkit/nbdkit.py" ]; then
  rg "process\.run\(" "v2v/tests/src/nbdkit/nbdkit.py" -A 3 -B 1
fi

Repository: autotest/tp-libvirt

Length of output: 24805


🌐 Web query:

avocado.utils.process.run() default ignore_status behavior exception

💡 Result:

In avocado.utils.process.run(cmd, ...), the ignore_status parameter defaults to False. With the default (ignore_status=False), if the command exits with a non-zero status, run() will raise avocado.utils.process.CmdError instead of returning normally. When ignore_status=True, it will not raise on non-zero exit status and will return the result anyway. [1]

Source(s):
[1] Avocado utils API docs (avocado.utils.process.run signature and ignore_status/CmdError behavior) (avocado-framework.readthedocs.io)

Citations:


🌐 Web query:

avocado framework process.run non-zero exit default behavior

💡 Result:

In Avocado, avocado.utils.process.run() defaults to ignore_status=False, meaning a non-zero exit code causes process.run() to raise avocado.utils.process.CmdError (instead of returning normally). If you want to accept non-zero exits, pass ignore_status=True. [1]

This is why Avocado docs commonly show wrapping process.run() in a try/except process.CmdError when failures are expected/handled. [2]

Sources: [1] [3] [2] [4]


Pass ignore_status=True to handle qemu-io failures gracefully.

avocado.utils.process.run() raises CmdError on non-zero exit when ignore_status is not set, making the result.exit_status check unreachable. Add ignore_status=True so the code can catch failures and fail the test with the instance-specific error message instead of a generic exception.

🛠️ Minimal fix
-                result = process.run(qemu_cmd, shell=True)
+                result = process.run(qemu_cmd, shell=True, ignore_status=True)
                 if result.exit_status != 0:
-                    test.fail(f"qemu-io failed for {inst['name']}")
+                    test.fail(
+                        f"qemu-io failed for {inst['name']}: "
+                        f"{result.stderr_text or result.stdout_text}"
+                    )
🧰 Tools
🪛 Ruff (0.15.6)

[error] 785-785: Function call with shell=True parameter identified, security issue

(S604)

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@v2v/tests/src/nbdkit/nbdkit.py` around lines 785 - 787, The call to
process.run in the qemu-io block should pass ignore_status=True so CmdError is
not raised on non-zero exit; update the process.run(qemu_cmd, ...) invocation to
include ignore_status=True, allowing the subsequent check of result.exit_status
and the instance-specific test.fail(f"qemu-io failed for {inst['name']}") to
execute normally.


# VALIDATION: Check the log file for the specific instance names
with open(log_path, 'r') as f:
log_content = f.read()

for inst in instances:
# Look for the debug pattern -> nbdkit[name]: debug: ...
expected_tag = f"nbdkit[{inst['name']}]: debug:"
if expected_tag not in log_content:
test.fail(f"Instance name '{inst['name']}' was not found in debug logs!")
LOG.info(f"Validated instance {inst['name']} appears in the debug logs")
finally:
# Cleanup: Kill the processes
LOG.info("Cleanup: Kill the processes")
for _, p in handles:
if p.poll() is None:
p.terminate()
try:
# Wait up to 5 seconds for it to exit
p.wait(timeout=5)
except TimeoutExpired:
# Force kill if it's being stubborn
p.kill()

if version_required and not multiple_versions_compare(
version_required):
test.cancel("Testing requires version: %s" % version_required)
Expand Down Expand Up @@ -817,5 +887,7 @@ def check_blocksize_constraints():
check_blkhash_option()
elif checkpoint == 'blocksize_constraints':
check_blocksize_constraints()
elif checkpoint == 'test_nbdkit_instance_name':
test_nbdkit_instance_name()
else:
test.error('Not found testcase: %s' % checkpoint)
Loading