summary refs log tree commit diff stats
path: root/tests/qemu-iotests
diff options
context:
space:
mode:
Diffstat (limited to 'tests/qemu-iotests')
-rwxr-xr-xtests/qemu-iotests/check6
-rw-r--r--tests/qemu-iotests/meson.build30
-rw-r--r--tests/qemu-iotests/testenv.py30
-rw-r--r--tests/qemu-iotests/testrunner.py49
4 files changed, 74 insertions, 41 deletions
diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check
index 0c27721a41..75de1b4691 100755
--- a/tests/qemu-iotests/check
+++ b/tests/qemu-iotests/check
@@ -32,8 +32,6 @@ def make_argparser() -> argparse.ArgumentParser:
 
     p.add_argument('-n', '--dry-run', action='store_true',
                    help='show me, do not run tests')
-    p.add_argument('-makecheck', action='store_true',
-                   help='pretty print output for make check')
     p.add_argument('-j', dest='jobs', type=int, default=1,
                    help='run tests in multiple parallel jobs')
 
@@ -53,6 +51,8 @@ def make_argparser() -> argparse.ArgumentParser:
     p.add_argument('--color', choices=['on', 'off', 'auto'],
                    default='auto', help="use terminal colors. The default "
                    "'auto' value means use colors if terminal stdout detected")
+    p.add_argument('-tap', action='store_true',
+                   help='produce TAP output')
 
     g_env = p.add_argument_group('test environment options')
     mg = g_env.add_mutually_exclusive_group()
@@ -164,7 +164,7 @@ if __name__ == '__main__':
     if args.dry_run:
         print('\n'.join(tests))
     else:
-        with TestRunner(env, makecheck=args.makecheck,
+        with TestRunner(env, tap=args.tap,
                         color=args.color) as tr:
             paths = [os.path.join(env.source_iotests, t) for t in tests]
             ok = tr.run_tests(paths, args.jobs)
diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build
new file mode 100644
index 0000000000..5be3c74127
--- /dev/null
+++ b/tests/qemu-iotests/meson.build
@@ -0,0 +1,30 @@
+if have_tools and targetos != 'windows'
+  qemu_iotests_binaries = [qemu_img, qemu_io, qemu_nbd, qsd]
+  qemu_iotests_env = {'PYTHON': python.full_path()}
+  qemu_iotests_formats = {
+    'qcow2': 'quick',
+    'raw': 'slow',
+    'qed': 'thorough',
+    'vmdk': 'thorough',
+    'vpc': 'thorough'
+  }
+
+  foreach k, v : emulators
+    if k.startswith('qemu-system-')
+      qemu_iotests_binaries += v
+    endif
+  endforeach
+  foreach format, speed: qemu_iotests_formats
+    if speed == 'quick'
+      suites = 'block'
+    else
+      suites = ['block-' + speed, speed]
+    endif
+    test('qemu-iotests ' + format, sh, args: [files('../check-block.sh'), format],
+         depends: qemu_iotests_binaries, env: qemu_iotests_env,
+         protocol: 'tap',
+         suite: suites,
+         timeout: 0,
+         is_parallel: false)
+  endforeach
+endif
diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py
index c33454fa68..0f32897fe8 100644
--- a/tests/qemu-iotests/testenv.py
+++ b/tests/qemu-iotests/testenv.py
@@ -287,21 +287,21 @@ class TestEnv(ContextManager['TestEnv']):
     def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
         self.close()
 
-    def print_env(self) -> None:
+    def print_env(self, prefix: str = '') -> None:
         template = """\
-QEMU          -- "{QEMU_PROG}" {QEMU_OPTIONS}
-QEMU_IMG      -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS}
-QEMU_IO       -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS}
-QEMU_NBD      -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS}
-IMGFMT        -- {IMGFMT}{imgopts}
-IMGPROTO      -- {IMGPROTO}
-PLATFORM      -- {platform}
-TEST_DIR      -- {TEST_DIR}
-SOCK_DIR      -- {SOCK_DIR}
-GDB_OPTIONS   -- {GDB_OPTIONS}
-VALGRIND_QEMU -- {VALGRIND_QEMU}
-PRINT_QEMU_OUTPUT -- {PRINT_QEMU}
-"""
+{prefix}QEMU          -- "{QEMU_PROG}" {QEMU_OPTIONS}
+{prefix}QEMU_IMG      -- "{QEMU_IMG_PROG}" {QEMU_IMG_OPTIONS}
+{prefix}QEMU_IO       -- "{QEMU_IO_PROG}" {QEMU_IO_OPTIONS}
+{prefix}QEMU_NBD      -- "{QEMU_NBD_PROG}" {QEMU_NBD_OPTIONS}
+{prefix}IMGFMT        -- {IMGFMT}{imgopts}
+{prefix}IMGPROTO      -- {IMGPROTO}
+{prefix}PLATFORM      -- {platform}
+{prefix}TEST_DIR      -- {TEST_DIR}
+{prefix}SOCK_DIR      -- {SOCK_DIR}
+{prefix}GDB_OPTIONS   -- {GDB_OPTIONS}
+{prefix}VALGRIND_QEMU -- {VALGRIND_QEMU}
+{prefix}PRINT_QEMU_OUTPUT -- {PRINT_QEMU}
+{prefix}"""
 
         args = collections.defaultdict(str, self.get_env())
 
@@ -310,5 +310,5 @@ PRINT_QEMU_OUTPUT -- {PRINT_QEMU}
 
         u = os.uname()
         args['platform'] = f'{u.sysname}/{u.machine} {u.nodename} {u.release}'
-
+        args['prefix'] = prefix
         print(template.format_map(args))
diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py
index 15788f919e..0eace147b8 100644
--- a/tests/qemu-iotests/testrunner.py
+++ b/tests/qemu-iotests/testrunner.py
@@ -152,10 +152,10 @@ class TestRunner(ContextManager['TestRunner']):
 
         return results
 
-    def __init__(self, env: TestEnv, makecheck: bool = False,
+    def __init__(self, env: TestEnv, tap: bool = False,
                  color: str = 'auto') -> None:
         self.env = env
-        self.makecheck = makecheck
+        self.tap = tap
         self.last_elapsed = LastElapsedTime('.last-elapsed-cache', env)
 
         assert color in ('auto', 'on', 'off')
@@ -185,13 +185,16 @@ class TestRunner(ContextManager['TestRunner']):
         """ Print short test info before/after test run """
         test = os.path.basename(test)
 
-        if self.makecheck and status != '...':
-            if status and status != 'pass':
-                status = f' [{status}]'
-            else:
-                status = ''
+        if test_field_width is None:
+            test_field_width = 8
 
-            print(f'  TEST   iotest-{self.env.imgfmt}: {test}{status}')
+        if self.tap:
+            if status == 'pass':
+                print(f'ok {self.env.imgfmt} {test}')
+            elif status == 'fail':
+                print(f'not ok {self.env.imgfmt} {test}')
+            elif status == 'not run':
+                print(f'ok {self.env.imgfmt} {test} # SKIP')
             return
 
         if lasttime:
@@ -343,7 +346,7 @@ class TestRunner(ContextManager['TestRunner']):
         last_el = self.last_elapsed.get(test)
         start = datetime.datetime.now().strftime('%H:%M:%S')
 
-        if not self.makecheck:
+        if not self.tap:
             self.test_print_one_line(test=test,
                                      test_field_width=test_field_width,
                                      status = 'started' if mp else '...',
@@ -372,7 +375,9 @@ class TestRunner(ContextManager['TestRunner']):
         notrun = []
         casenotrun = []
 
-        if not self.makecheck:
+        if self.tap:
+            self.env.print_env('# ')
+        else:
             self.env.print_env()
 
         test_field_width = max(len(os.path.basename(t)) for t in tests) + 2
@@ -398,8 +403,6 @@ class TestRunner(ContextManager['TestRunner']):
 
             if res.status == 'fail':
                 failed.append(name)
-                if self.makecheck:
-                    self.env.print_env()
                 if res.diff:
                     print('\n'.join(res.diff))
             elif res.status == 'not run':
@@ -412,16 +415,16 @@ class TestRunner(ContextManager['TestRunner']):
             if res.interrupted:
                 break
 
-        if notrun:
-            print('Not run:', ' '.join(notrun))
+        if not self.tap:
+            if notrun:
+                print('Not run:', ' '.join(notrun))
 
-        if casenotrun:
-            print('Some cases not run in:', ' '.join(casenotrun))
+            if casenotrun:
+                print('Some cases not run in:', ' '.join(casenotrun))
 
-        if failed:
-            print('Failures:', ' '.join(failed))
-            print(f'Failed {len(failed)} of {n_run} iotests')
-            return False
-        else:
-            print(f'Passed all {n_run} iotests')
-            return True
+            if failed:
+                print('Failures:', ' '.join(failed))
+                print(f'Failed {len(failed)} of {n_run} iotests')
+            else:
+                print(f'Passed all {n_run} iotests')
+        return not failed