summary refs log tree commit diff stats
path: root/tests/functional
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional')
-rw-r--r--tests/functional/acpi-bits/bits-config/bits-cfg.txt18
-rw-r--r--tests/functional/acpi-bits/bits-tests/smbios.py22434
-rw-r--r--tests/functional/acpi-bits/bits-tests/smilatency.py2107
-rw-r--r--tests/functional/acpi-bits/bits-tests/testacpi.py2287
-rw-r--r--tests/functional/acpi-bits/bits-tests/testcpuid.py287
-rw-r--r--tests/functional/meson.build205
-rw-r--r--tests/functional/qemu_test/__init__.py14
-rw-r--r--tests/functional/qemu_test/asset.py171
-rw-r--r--tests/functional/qemu_test/cmd.py193
-rw-r--r--tests/functional/qemu_test/config.py36
-rw-r--r--tests/functional/qemu_test/tesseract.py35
-rw-r--r--tests/functional/qemu_test/testcase.py202
-rw-r--r--tests/functional/qemu_test/utils.py56
-rwxr-xr-xtests/functional/test_aarch64_sbsaref.py186
-rwxr-xr-xtests/functional/test_aarch64_virt.py131
-rwxr-xr-xtests/functional/test_acpi_bits.py410
-rwxr-xr-xtests/functional/test_arm_bflt.py44
-rwxr-xr-xtests/functional/test_arm_canona1100.py39
-rwxr-xr-xtests/functional/test_arm_integratorcp.py105
-rwxr-xr-xtests/functional/test_avr_mega2560.py52
-rwxr-xr-xtests/functional/test_cpu_queries.py37
-rwxr-xr-xtests/functional/test_empty_cpu_model.py24
-rwxr-xr-xtests/functional/test_info_usernet.py36
-rwxr-xr-xtests/functional/test_linux_initrd.py96
-rwxr-xr-xtests/functional/test_loongarch64_virt.py62
-rwxr-xr-xtests/functional/test_m68k_nextcube.py73
-rwxr-xr-xtests/functional/test_mem_addr_space.py314
-rwxr-xr-xtests/functional/test_microblaze_s3adsp1800.py40
-rwxr-xr-xtests/functional/test_microblazeel_s3adsp1800.py42
-rwxr-xr-xtests/functional/test_mips64el_fuloong2e.py45
-rwxr-xr-xtests/functional/test_mips64el_loongson3v.py39
-rwxr-xr-xtests/functional/test_netdev_ethtool.py88
-rwxr-xr-xtests/functional/test_pc_cpu_hotplug_props.py36
-rwxr-xr-xtests/functional/test_ppc64_hv.py195
-rwxr-xr-xtests/functional/test_ppc64_powernv.py82
-rwxr-xr-xtests/functional/test_ppc64_pseries.py90
-rwxr-xr-xtests/functional/test_ppc_405.py37
-rwxr-xr-xtests/functional/test_ppc_40p.py78
-rwxr-xr-xtests/functional/test_ppc_74xx.py126
-rwxr-xr-xtests/functional/test_ppc_amiga.py43
-rwxr-xr-xtests/functional/test_ppc_bamboo.py43
-rwxr-xr-xtests/functional/test_ppc_mpc8544ds.py37
-rwxr-xr-xtests/functional/test_ppc_virtex_ml507.py39
-rwxr-xr-xtests/functional/test_rx_gdbsim.py78
-rwxr-xr-xtests/functional/test_s390x_ccw_virtio.py276
-rwxr-xr-xtests/functional/test_s390x_topology.py421
-rwxr-xr-xtests/functional/test_sparc64_sun4u.py41
-rwxr-xr-xtests/functional/test_version.py28
-rwxr-xr-xtests/functional/test_virtio_gpu.py151
-rwxr-xr-xtests/functional/test_virtio_version.py177
-rwxr-xr-xtests/functional/test_x86_cpu_model_versions.py335
51 files changed, 8021 insertions, 0 deletions
diff --git a/tests/functional/acpi-bits/bits-config/bits-cfg.txt b/tests/functional/acpi-bits/bits-config/bits-cfg.txt
new file mode 100644
index 0000000000..8010804453
--- /dev/null
+++ b/tests/functional/acpi-bits/bits-config/bits-cfg.txt
@@ -0,0 +1,18 @@
+# BITS configuration file
+[bits]
+
+# To run BITS in batch mode, set batch to a list of one or more of the
+# following keywords; BITS will then run all of the requested operations, then
+# save the log file to disk.
+#
+# test: Run the full BITS testsuite.
+# acpi: Dump all ACPI structures.
+# smbios: Dump all SMBIOS structures.
+#
+# Leave batch set to an empty string to disable batch mode.
+# batch =
+
+# Uncomment the following to run all available batch operations
+# please take a look at boot/python/init.py in bits zip file
+# to see how these options are parsed and used.
+batch = test acpi smbios
diff --git a/tests/functional/acpi-bits/bits-tests/smbios.py2 b/tests/functional/acpi-bits/bits-tests/smbios.py2
new file mode 100644
index 0000000000..5868a7137a
--- /dev/null
+++ b/tests/functional/acpi-bits/bits-tests/smbios.py2
@@ -0,0 +1,2434 @@
+# Copyright (c) 2015, Intel Corporation
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright notice,
+#       this list of conditions and the following disclaimer in the documentation
+#       and/or other materials provided with the distribution.
+#     * Neither the name of Intel Corporation nor the names of its contributors
+#       may be used to endorse or promote products derived from this software
+#       without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script runs only from the biosbits VM.
+
+"""SMBIOS/DMI module."""
+
+import bits
+import bitfields
+import ctypes
+import redirect
+import struct
+import uuid
+import unpack
+import ttypager
+import sys
+
+class SMBIOS(unpack.Struct):
+    def __new__(cls):
+        if sys.platform == "BITS-EFI":
+            import efi
+            sm_ptr = efi.system_table.ConfigurationTableDict.get(efi.SMBIOS_TABLE_GUID)
+        else:
+            address = 0xF0000
+            mem = bits.memory(0xF0000, 0x10000)
+            for offset in range(0, len(mem), 16):
+                signature = (ctypes.c_char * 4).from_address(address + offset).value
+                if signature == "_SM_":
+                    entry_point_length = ctypes.c_ubyte.from_address(address + offset + 5).value
+                    csum = sum(map(ord, mem[offset:offset + entry_point_length])) & 0xff
+                    if csum == 0:
+                        sm_ptr = address + offset
+                        break
+            else:
+                return None
+
+        if not sm_ptr:
+            return None
+
+        sm = super(SMBIOS, cls).__new__(cls)
+        sm._header_memory = bits.memory(sm_ptr, 0x1f)
+        return sm
+
+    def __init__(self):
+        super(SMBIOS, self).__init__()
+        u = unpack.Unpackable(self._header_memory)
+        self.add_field('header', Header(u))
+        self._structure_memory = bits.memory(self.header.structure_table_address, self.header.structure_table_length)
+        u = unpack.Unpackable(self._structure_memory)
+        self.add_field('structures', unpack.unpack_all(u, _smbios_structures, self), unpack.format_each("\n\n{!r}"))
+
+    def structure_type(self, num):
+        '''Dumps structure of given Type if present'''
+        try:
+            types_present = [self.structures[x].smbios_structure_type for x in range(len(self.structures))]
+            matrix = dict()
+            for index in range(len(types_present)):
+                if types_present.count(types_present[index]) == 1:
+                    matrix[types_present[index]] = self.structures[index]
+                else: # if multiple structures of the same type, return a list of structures for the type number
+                    if matrix.has_key(types_present[index]):
+                        matrix[types_present[index]].append(self.structures[index])
+                    else:
+                        matrix[types_present[index]] = [self.structures[index]]
+            return matrix[num]
+        except:
+            print "Failure: Type {} - not found".format(num)
+
+class Header(unpack.Struct):
+    def __new__(cls, u):
+        return super(Header, cls).__new__(cls)
+
+    def __init__(self, u):
+        super(Header, self).__init__()
+        self.raw_data = u.unpack_rest()
+        u = unpack.Unpackable(self.raw_data)
+        self.add_field('anchor_string', u.unpack_one("4s"))
+        self.add_field('checksum', u.unpack_one("B"))
+        self.add_field('length', u.unpack_one("B"))
+        self.add_field('major_version', u.unpack_one("B"))
+        self.add_field('minor_version', u.unpack_one("B"))
+        self.add_field('max_structure_size', u.unpack_one("<H"))
+        self.add_field('entry_point_revision', u.unpack_one("B"))
+        self.add_field('formatted_area', u.unpack_one("5s"))
+        self.add_field('intermediate_anchor_string', u.unpack_one("5s"))
+        self.add_field('intermediate_checksum', u.unpack_one("B"))
+        self.add_field('structure_table_length', u.unpack_one("<H"))
+        self.add_field('structure_table_address', u.unpack_one("<I"))
+        self.add_field('number_structures', u.unpack_one("<H"))
+        self.add_field('bcd_revision', u.unpack_one("B"))
+        if not u.at_end():
+            self.add_field('data', u.unpack_rest())
+
+class SmbiosBaseStructure(unpack.Struct):
+    def __new__(cls, u, sm):
+        t = u.unpack_peek_one("B")
+        if cls.smbios_structure_type is not None and t != cls.smbios_structure_type:
+            return None
+        return super(SmbiosBaseStructure, cls).__new__(cls)
+
+    def __init__(self, u, sm):
+        super(SmbiosBaseStructure, self).__init__()
+        self.start_offset = u.offset
+        length = u.unpack_peek_one("<xB")
+        self.raw_data = u.unpack_raw(length)
+        self.u = unpack.Unpackable(self.raw_data)
+
+        self.strings_offset = u.offset
+        def unpack_string():
+            return "".join(iter(lambda: u.unpack_one("c"), "\x00"))
+        strings = list(iter(unpack_string, ""))
+        if not strings:
+            u.skip(1)
+
+        self.strings_length = u.offset - self.strings_offset
+        self.raw_strings = str(bits.memory(sm.header.structure_table_address + self.strings_offset, self.strings_length))
+
+        if len(strings):
+            self.strings = strings
+
+        self.add_field('type', self.u.unpack_one("B"))
+        self.add_field('length', self.u.unpack_one("B"))
+        self.add_field('handle', self.u.unpack_one("<H"))
+
+    def fini(self):
+        if not self.u.at_end():
+            self.add_field('data', self.u.unpack_rest())
+        del self.u
+
+    def fmtstr(self, i):
+        """Format the specified index and the associated string"""
+        return "{} '{}'".format(i, self.getstr(i))
+
+    def getstr(self, i):
+        """Get the string associated with the given index"""
+        if i == 0:
+            return "(none)"
+        if not hasattr(self, "strings"):
+            return "(error: structure has no strings)"
+        if i > len(self.strings):
+            return "(error: string index out of range)"
+        return self.strings[i - 1]
+
+class BIOSInformation(SmbiosBaseStructure):
+    smbios_structure_type = 0
+
+    def __init__(self, u, sm):
+        super(BIOSInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('vendor', u.unpack_one("B"), self.fmtstr)
+            self.add_field('version', u.unpack_one("B"), self.fmtstr)
+            self.add_field('starting_address_segment', u.unpack_one("<H"))
+            self.add_field('release_date', u.unpack_one("B"), self.fmtstr)
+            self.add_field('rom_size', u.unpack_one("B"))
+            self.add_field('characteristics', u.unpack_one("<Q"))
+            minor_version_str = str(sm.header.minor_version) # 34 is .34, 4 is .4, 41 is .41; compare ASCIIbetically to compare initial digits rather than numeric value
+            if (sm.header.major_version, minor_version_str) >= (2,"4"):
+                characteristic_bytes = 2
+            else:
+                characteristic_bytes = self.length - 0x12
+            self.add_field('characteristics_extensions', [u.unpack_one("B") for b in range(characteristic_bytes)])
+            if (sm.header.major_version, minor_version_str) >= (2,"4"):
+                self.add_field('major_release', u.unpack_one("B"))
+                self.add_field('minor_release', u.unpack_one("B"))
+                self.add_field('ec_major_release', u.unpack_one("B"))
+                self.add_field('ec_minor_release', u.unpack_one("B"))
+        except:
+            self.decode_failure = True
+            print "Error parsing BIOSInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemInformation(SmbiosBaseStructure):
+    smbios_structure_type = 1
+
+    def __init__(self, u, sm):
+        super(SystemInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            self.add_field('product_name', u.unpack_one("B"), self.fmtstr)
+            self.add_field('version', u.unpack_one("B"), self.fmtstr)
+            self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x8:
+                self.add_field('uuid', uuid.UUID(bytes_le=u.unpack_one("16s")))
+                wakeup_types = {
+                    0: 'Reserved',
+                    1: 'Other',
+                    2: 'Unknown',
+                    3: 'APM Timer',
+                    4: 'Modem Ring',
+                    5: 'LAN Remote',
+                    6: 'Power Switch',
+                    7: 'PCI PME#',
+                    8: 'AC Power Restored'
+                }
+                self.add_field('wakeup_type', u.unpack_one("B"), unpack.format_table("{}", wakeup_types))
+            if self.length > 0x19:
+                self.add_field('sku_number', u.unpack_one("B"), self.fmtstr)
+                self.add_field('family', u.unpack_one("B"), self.fmtstr)
+        except:
+            self.decode_failure = True
+            print "Error parsing SystemInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+_board_types = {
+    1: 'Unknown',
+    2: 'Other',
+    3: 'Server Blade',
+    4: 'Connectivity Switch',
+    5: 'System Management Module',
+    6: 'Processor Module',
+    7: 'I/O Module',
+    8: 'Memory Module',
+    9: 'Daughter Board',
+    0xA: 'Motherboard',
+    0xB: 'Processor/Memory Module',
+    0xC: 'Processor/IO Module',
+    0xD: 'Interconnect Board'
+}
+
+class BaseboardInformation(SmbiosBaseStructure):
+    smbios_structure_type = 2
+
+    def __init__(self, u, sm):
+        super(BaseboardInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            self.add_field('product', u.unpack_one("B"), self.fmtstr)
+            self.add_field('version', u.unpack_one("B"), self.fmtstr)
+            self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+
+            if self.length > 0x8:
+                self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
+
+            if self.length > 0x9:
+                self.add_field('feature_flags', u.unpack_one("B"))
+                self.add_field('hosting_board', bool(bitfields.getbits(self.feature_flags, 0)), "feature_flags[0]={}")
+                self.add_field('requires_daughter_card', bool(bitfields.getbits(self.feature_flags, 1)), "feature_flags[1]={}")
+                self.add_field('removable', bool(bitfields.getbits(self.feature_flags, 2)), "feature_flags[2]={}")
+                self.add_field('replaceable', bool(bitfields.getbits(self.feature_flags, 3)), "feature_flags[3]={}")
+                self.add_field('hot_swappable', bool(bitfields.getbits(self.feature_flags, 4)), "feature_flags[4]={}")
+
+            if self.length > 0xA:
+                self.add_field('location', u.unpack_one("B"), self.fmtstr)
+
+            if self.length > 0xB:
+                self.add_field('chassis_handle', u.unpack_one("<H"))
+
+            if self.length > 0xD:
+                self.add_field('board_type', u.unpack_one("B"), unpack.format_table("{}", _board_types))
+
+            if self.length > 0xE:
+                self.add_field('handle_count', u.unpack_one("B"))
+                if self.handle_count > 0:
+                    self.add_field('contained_object_handles', tuple(u.unpack_one("<H") for i in range(self.handle_count)))
+        except:
+            self.decode_failure = True
+            print "Error parsing BaseboardInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemEnclosure(SmbiosBaseStructure):
+    smbios_structure_type = 3
+
+    def __init__(self, u, sm):
+        super(SystemEnclosure, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            self.add_field('enumerated_type', u.unpack_one("B"))
+            self.add_field('chassis_lock_present', bool(bitfields.getbits(self.enumerated_type, 7)), "enumerated_type[7]={}")
+            board_types = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Desktop',
+                0x04: 'Low Profile Desktop',
+                0x05: 'Pizza Box',
+                0x06: 'Mini Tower',
+                0x07: 'Tower',
+                0x08: 'Portable',
+                0x09: 'Laptop',
+                0x0A: 'Notebook',
+                0x0B: 'Hand Held',
+                0x0C: 'Docking Station',
+                0x0D: 'All in One',
+                0x0E: 'Sub Notebook',
+                0x0F: 'Space-saving',
+                0x10: 'Lunch Box',
+                0x11: 'Main Server Chassis',
+                0x12: 'Expansion Chassis',
+                0x13: 'SubChassis',
+                0x14: 'Bus Expansion Chassis',
+                0x15: 'Peripheral Chassis',
+                0x16: 'RAID Chassis',
+                0x17: 'Rack Mount Chassis',
+                0x18: 'Sealed-case PC',
+                0x19: 'Multi-system chassis W',
+                0x1A: 'Compact PCI',
+                0x1B: 'Advanced TCA',
+                0x1C: 'Blade',
+                0x1D: 'Blade Enclosure',
+            }
+            self.add_field('system_enclosure_type', bitfields.getbits(self.enumerated_type, 6, 0), unpack.format_table("enumerated_type[6:0]={}", board_types))
+            self.add_field('version', u.unpack_one("B"), self.fmtstr)
+            self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+            self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
+            minor_version_str = str(sm.header.minor_version) # 34 is .34, 4 is .4, 41 is .41; compare ASCIIbetically to compare initial digits rather than numeric value
+            if self.length > 9:
+                chassis_states = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Safe',
+                    0x04: 'Warning',
+                    0x05: 'Critical',
+                    0x06: 'Non-recoverable',
+                }
+                self.add_field('bootup_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
+                self.add_field('power_supply_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
+                self.add_field('thermal_state', u.unpack_one("B"), unpack.format_table("{}", chassis_states))
+                security_states = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'None',
+                    0x04: 'External interface locked out',
+                    0x05: 'External interface enabled',
+                }
+                self.add_field('security_status', u.unpack_one("B"), unpack.format_table("{}", security_states))
+            if self.length > 0xd:
+                self.add_field('oem_defined', u.unpack_one("<I"))
+            if self.length > 0x11:
+                self.add_field('height', u.unpack_one("B"))
+                self.add_field('num_power_cords', u.unpack_one("B"))
+                self.add_field('contained_element_count', u.unpack_one("B"))
+                self.add_field('contained_element_length', u.unpack_one("B"))
+            if getattr(self, 'contained_element_count', 0):
+                self.add_field('contained_elements', tuple(SystemEnclosureContainedElement(u, self.contained_element_length) for i in range(self.contained_element_count)))
+            if self.length > (0x15 + (getattr(self, 'contained_element_count', 0) * getattr(self, 'contained_element_length', 0))):
+                self.add_field('sku_number', u.unpack_one("B"), self.fmtstr)
+        except:
+            self.decode_failure = True
+            print "Error parsing SystemEnclosure"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemEnclosureContainedElement(unpack.Struct):
+    def __init__(self, u, length):
+        super(SystemEnclosureContainedElement, self).__init__()
+        self.start_offset = u.offset
+        self.raw_data = u.unpack_raw(length)
+        self.u = unpack.Unpackable(self.raw_data)
+        u = self.u
+        self.add_field('contained_element_type', u.unpack_one("B"))
+        type_selections = {
+            0: 'SMBIOS baseboard type enumeration',
+            1: 'SMBIOS structure type enumeration',
+        }
+        self.add_field('type_select', bitfields.getbits(self.contained_element_type, 7), unpack.format_table("contained_element_type[7]={}", type_selections))
+        self.add_field('type', bitfields.getbits(self.contained_element_type, 6, 0))
+        if self.type_select == 0:
+            self.add_field('smbios_board_type', self.type, unpack.format_table("{}", _board_types))
+        else:
+            self.add_field('smbios_structure_type', self.type)
+        self.add_field('minimum', u.unpack_one("B"))
+        self.add_field('maximum', u.unpack_one("B"))
+        if not u.at_end():
+            self.add_field('data', u.unpack_rest())
+        del self.u
+
+class ProcessorInformation(SmbiosBaseStructure):
+    smbios_structure_type = 4
+
+    def __init__(self, u, sm):
+        super(ProcessorInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('socket_designation', u.unpack_one("B"), self.fmtstr)
+            processor_types = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Central Processor',
+                0x04: 'Math Processor',
+                0x05: 'DSP Processor',
+                0x06: 'Video Processor',
+            }
+            self.add_field('processor_type', u.unpack_one("B"), unpack.format_table("{}", processor_types))
+            self.add_field('processor_family', u.unpack_one("B"))
+            self.add_field('processor_manufacturer', u.unpack_one("B"), self.fmtstr)
+            self.add_field('processor_id', u.unpack_one("<Q"))
+            self.add_field('processor_version', u.unpack_one("B"), self.fmtstr)
+            self.add_field('voltage', u.unpack_one("B"))
+            self.add_field('external_clock', u.unpack_one("<H"))
+            self.add_field('max_speed', u.unpack_one("<H"))
+            self.add_field('current_speed', u.unpack_one("<H"))
+            self.add_field('status', u.unpack_one("B"))
+            processor_upgrades = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Daughter Board',
+                0x04: 'ZIF Socket',
+                0x05: 'Replaceable Piggy Back',
+                0x06: 'None',
+                0x07: 'LIF Socket',
+                0x08: 'Slot 1',
+                0x09: 'Slot 2',
+                0x0A: '370-pin socket',
+                0x0B: 'Slot A',
+                0x0C: 'Slot M',
+                0x0D: 'Socket 423',
+                0x0E: 'Socket A (Socket 462)',
+                0x0F: 'Socket 478',
+                0x10: 'Socket 754',
+                0x11: 'Socket 940',
+                0x12: 'Socket 939',
+                0x13: 'Socket mPGA604',
+                0x14: 'Socket LGA771',
+                0x15: 'Socket LGA775',
+                0x16: 'Socket S1',
+                0x17: 'Socket AM2',
+                0x18: 'Socket F (1207)',
+                0x19: 'Socket LGA1366',
+                0x1A: 'Socket G34',
+                0x1B: 'Socket AM3',
+                0x1C: 'Socket C32',
+                0x1D: 'Socket LGA1156',
+                0x1E: 'Socket LGA1567',
+                0x1F: 'Socket PGA988A',
+                0x20: 'Socket BGA1288',
+                0x21: 'Socket rPGA988B',
+                0x22: 'Socket BGA1023',
+                0x23: 'Socket BGA1224',
+                0x24: 'Socket BGA1155',
+                0x25: 'Socket LGA1356',
+                0x26: 'Socket LGA2011',
+                0x27: 'Socket FS1',
+                0x28: 'Socket FS2',
+                0x29: 'Socket FM1',
+                0x2A: 'Socket FM2',
+            }
+            self.add_field('processor_upgrade', u.unpack_one("B"), unpack.format_table("{}", processor_upgrades))
+            if self.length > 0x1A:
+                self.add_field('l1_cache_handle', u.unpack_one("<H"))
+                self.add_field('l2_cache_handle', u.unpack_one("<H"))
+                self.add_field('l3_cache_handle', u.unpack_one("<H"))
+            if self.length > 0x20:
+                self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+                self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
+                self.add_field('part_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x24:
+                self.add_field('core_count', u.unpack_one("B"))
+                self.add_field('core_enabled', u.unpack_one("B"))
+                self.add_field('thread_count', u.unpack_one("B"))
+                self.add_field('processor_characteristics', u.unpack_one("<H"))
+            if self.length > 0x28:
+                self.add_field('processor_family_2', u.unpack_one("<H"))
+            if self.length > 0x2A:
+                self.add_field('core_count2', u.unpack_one("<H"))
+                self.add_field('core_enabled2', u.unpack_one("<H"))
+                self.add_field('thread_count2', u.unpack_one("<H"))
+        except:
+            self.decode_failure = True
+            print "Error parsing Processor Information"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryControllerInformation(SmbiosBaseStructure): #obsolete starting with v2.1
+    smbios_structure_type = 5
+
+    def __init__(self, u, sm):
+        super(MemoryControllerInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            _error_detecting_method = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'None',
+                0x04: '8-bit Parity',
+                0x05: '32-bit ECC',
+                0x06: '64-bit ECC',
+                0x07: '128-bit ECC',
+                0x08: 'CRC'
+                }
+            self.add_field('error_detecting_method', u.unpack_one("B"), unpack.format_table("{}", _error_detecting_method))
+            self.add_field('error_correcting_capability', u.unpack_one("B"))
+            _interleaves = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'One-Way Interleave',
+                0x04: 'Two-Way Interleave',
+                0x05: 'Four-Way Interleave',
+                0x06: 'Eight-Way Interleave',
+                0x07: 'Sixteen-Way Interleave'
+                }
+            self.add_field('supported_interleave', u.unpack_one("B"), unpack.format_table("{}", _interleaves))
+            self.add_field('current_interleave', u.unpack_one("B"), unpack.format_table("{}", _interleaves))
+            self.add_field('max_memory_module_size', u.unpack_one("B"), self.fmtstr)
+            self.add_field('supported_speeds', u.unpack_one("<H"))
+            self.add_field('supported_memory_types', u.unpack_one("<H"))
+            self.add_field('memory_module_voltage', u.unpack_one("B"))
+            self.add_field('req_voltage_b2', bitfields.getbits(self.memory_module_voltage, 2), "memory_module_voltage[2]={}")
+            self.add_field('req_voltage_b1', bitfields.getbits(self.memory_module_voltage, 1), "memory_module_voltage[1]={}")
+            self.add_field('req_voltage_b0', bitfields.getbits(self.memory_module_voltage, 0), "memory_module_voltage[0]={}")
+            self.add_field('num_associated_memory_slots', u.unpack_one("B"))
+            self.add_field('memory_module_configuration_handles', u.unpack_one("<(self.num_associated_memory_slots)H"))
+            self.add_field('enabled_error_correcting_capabilities', u.unpack_one("B"))
+        except:
+            self.decode_failure = True
+            print "Error parsing MemoryControllerInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryModuleInformation(SmbiosBaseStructure): #obsolete starting with v2.1
+    smbios_structure_type = 6
+
+    def __init__(self, u, sm):
+        super(MemoryModuleInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('socket_designation', u.unpack_one("B"), self.fmtstr)
+            self.add_field('bank_connections', u.unpack_one("B"))
+            self.add_field('current_speed', u.unpack_one("B"))
+            self.add_field('current_memory_type', u.unpack_one("<H"))
+            _mem_connection = {
+                0: 'single',
+                1: 'double-bank'
+                }
+            self.add_field('installed_mem', u.unpack_one("B"))
+            self.add_field('installed_size', bitfields.getbits(self.installed_mem, 6, 0), "installed_mem[6:0]={}")
+            self.add_field('installed_memory_module_connection', bitfields.getbits(self.installed_mem, 7), unpack.format_table("installed_mem[7]={}", _mem_connection))
+            self.add_field('enabled_mem', u.unpack_one("B"))
+            self.add_field('enabled_size', bitfields.getbits(self.installed_mem, 6, 0), "enabled_mem[6:0]={}")
+            self.add_field('enabled_memory_module_connection', bitfields.getbits(self.installed_mem, 7), unpack.format_table("enabled_mem[7]={}", _mem_connection))
+            self.add_field('error_status', u.unpack_one("B"))
+            self.add_field('error_status_info_obstained_from_event_log', bool(bitfields.getbits(self.error_status, 2)), unpack.format_table("error_status[2]={}", _mem_connection))
+            self.add_field('correctable_errors_received', bool(bitfields.getbits(self.error_status, 1)), unpack.format_table("error_status[1]={}", _mem_connection))
+            self.add_field('uncorrectable_errors_received', bool(bitfields.getbits(self.error_status, 0)), unpack.format_table("error_status[0]={}", _mem_connection))
+        except:
+            self.decode_failure = True
+            print "Error parsing MemoryModuleInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class CacheInformation(SmbiosBaseStructure):
+    smbios_structure_type = 7
+
+    def __init__(self, u, sm):
+        super(CacheInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('socket_designation', u.unpack_one("B"), self.fmtstr)
+            processor_types = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Central Processor',
+                0x04: 'Math Processor',
+                0x05: 'DSP Processor',
+                0x06: 'Video Processor',
+            }
+            self.add_field('cache_configuration', u.unpack_one("<H"))
+            _operational_mode = {
+                0b00: 'Write Through',
+                0b01: 'Write Back',
+                0b10: 'Varies with Memory Address',
+                0b11: 'Unknown'
+                }
+            self.add_field('operational_mode', bitfields.getbits(self.cache_configuration, 9, 8), unpack.format_table("cache_configuration[9:8]={}", _operational_mode))
+            self.add_field('enabled_at_boot_time', bool(bitfields.getbits(self.cache_configuration, 7)), "cache_configuration[7]={}")
+            _location = {
+                0b00: 'Internal',
+                0b01: 'External',
+                0b10: 'Reserved',
+                0b11: 'Unknown'
+                }
+            self.add_field('location_relative_to_cpu_module', bitfields.getbits(self.cache_configuration, 6, 5), unpack.format_table("cache_configuration[6:5]={}", _location))
+            self.add_field('cache_socketed', bool(bitfields.getbits(self.cache_configuration, 3)), "cache_configuration[3]={}")
+            self.add_field('cache_level', bitfields.getbits(self.cache_configuration, 2, 0), "cache_configuration[2:0]={}")
+            self.add_field('max_cache_size', u.unpack_one("<H"))
+            _granularity = {
+                0: '1K granularity',
+                1: '64K granularity'
+                }
+            self.add_field('max_granularity', bitfields.getbits(self.cache_configuration, 15), unpack.format_table("max_cache_size[15]={}", _granularity))
+            self.add_field('max_size_in_granularity', bitfields.getbits(self.cache_configuration, 14, 0), "max_cache_size[14, 0]={}")
+            self.add_field('installed_size', u.unpack_one("<H"))
+            if self.installed_size != 0:
+                self.add_field('installed_granularity', bitfields.getbits(self.cache_configuration, 15), unpack.format_table("installed_size[15]={}", _granularity))
+                self.add_field('installed_size_in_granularity', bitfields.getbits(self.cache_configuration, 14, 0), "installed_size[14, 0]={}")
+            self.add_field('supported_sram_type', u.unpack_one("<H"))
+            self.add_field('current_sram_type', u.unpack_one("<H"))
+            if self.length > 0x0F:
+                self.add_field('cache_speed', u.unpack_one("B"))
+            if self.length > 0x10:
+                _error_correction = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'None',
+                    0x04: 'Parity',
+                    0x05: 'Single-bit ECC',
+                    0x06: 'Multi-bit ECC'
+                    }
+                self.add_field('error_correction', u.unpack_one("B"), unpack.format_table("{}", _error_correction))
+            if self.length > 0x10:
+                _system_cache_type = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Instruction',
+                    0x04: 'Data',
+                    0x05: 'Unified'
+                    }
+                self.add_field('system_cache_type', u.unpack_one("B"), unpack.format_table("{}", _system_cache_type))
+            if self.length > 0x12:
+                _associativity = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Direct Mapped',
+                    0x04: '2-way Set-Associative',
+                    0x05: '4-way Set-Associative',
+                    0x06: 'Fully Associative',
+                    0x07: '8-way Set-Associative',
+                    0x08: '16-way Set-Associative',
+                    0x09: '12-way Set-Associative',
+                    0x0A: '24-way Set-Associative',
+                    0x0B: '32-way Set-Associative',
+                    0x0C: '48-way Set-Associative',
+                    0x0D: '64-way Set-Associative',
+                    0x0E: '20-way Set-Associative'
+                    }
+                self.add_field('associativity', u.unpack_one("B"), unpack.format_table("{}", _associativity))
+
+        except:
+            self.decode_failure = True
+            print "Error parsing CacheInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class PortConnectorInfo(SmbiosBaseStructure):
+    smbios_structure_type = 8
+
+    def __init__(self, u, sm):
+        super(PortConnectorInfo, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('internal_reference_designator', u.unpack_one("B"), self.fmtstr)
+            connector_types = {
+                0x00: 'None',
+                0x01: 'Centronics',
+                0x02: 'Mini Centronics',
+                0x03: 'Proprietary',
+                0x04: 'DB-25 pin male',
+                0x05: 'DB-25 pin female',
+                0x06: 'DB-15 pin male',
+                0x07: 'DB-15 pin female',
+                0x08: 'DB-9 pin male',
+                0x09: 'DB-9 pin female',
+                0x0A: 'RJ-11',
+                0x0B: 'RJ-45',
+                0x0C: '50-pin MiniSCSI',
+                0x0D: 'Mini-DIN',
+                0x0E: 'Micro-DIN',
+                0x0F: 'PS/2',
+                0x10: 'Infrared',
+                0x11: 'HP-HIL',
+                0x12: 'Access Bus (USB)',
+                0x13: 'SSA SCSI',
+                0x14: 'Circular DIN-8 male',
+                0x15: 'Circular DIN-8 female',
+                0x16: 'On Board IDE',
+                0x17: 'On Board Floppy',
+                0x18: '9-pin Dual Inline (pin 10 cut)',
+                0x19: '25-pin Dual Inline (pin 26 cut)',
+                0x1A: '50-pin Dual Inline',
+                0x1B: '68-pin Dual Inline',
+                0x1C: 'On Board Sound Input from CD-ROM',
+                0x1D: 'Mini-Centronics Type-14',
+                0x1E: 'Mini-Centronics Type-26',
+                0x1F: 'Mini-jack (headphones)',
+                0x20: 'BNC',
+                0x21: '1394',
+                0x22: 'SAS/SATA Plug Receptacle',
+                0xA0: 'PC-98',
+                0xA1: 'PC-98Hireso',
+                0xA2: 'PC-H98',
+                0xA3: 'PC-98Note',
+                0xA4: 'PC-98Full',
+                0xFF: 'Other',
+            }
+            self.add_field('internal_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types))
+            self.add_field('external_reference_designator', u.unpack_one("B"), self.fmtstr)
+            self.add_field('external_connector_type', u.unpack_one("B"), unpack.format_table("{}", connector_types))
+            port_types = {
+                0x00: 'None',
+                0x01: 'Parallel Port XT/AT Compatible',
+                0x02: 'Parallel Port PS/2',
+                0x03: 'Parallel Port ECP',
+                0x04: 'Parallel Port EPP',
+                0x05: 'Parallel Port ECP/EPP',
+                0x06: 'Serial Port XT/AT Compatible',
+                0x07: 'Serial Port 16450 Compatible',
+                0x08: 'Serial Port 16550 Compatible',
+                0x09: 'Serial Port 16550A Compatible',
+                0x0A: 'SCSI Port',
+                0x0B: 'MIDI Port',
+                0x0C: 'Joy Stick Port',
+                0x0D: 'Keyboard Port',
+                0x0E: 'Mouse Port',
+                0x0F: 'SSA SCSI',
+                0x10: 'USB',
+                0x11: 'FireWire (IEEE P1394)',
+                0x12: 'PCMCIA Type I2',
+                0x13: 'PCMCIA Type II',
+                0x14: 'PCMCIA Type III',
+                0x15: 'Cardbus',
+                0x16: 'Access Bus Port',
+                0x17: 'SCSI II',
+                0x18: 'SCSI Wide',
+                0x19: 'PC-98',
+                0x1A: 'PC-98-Hireso',
+                0x1B: 'PC-H98',
+                0x1C: 'Video Port',
+                0x1D: 'Audio Port',
+                0x1E: 'Modem Port',
+                0x1F: 'Network Port',
+                0x20: 'SATA',
+                0x21: 'SAS',
+                0xA0: '8251 Compatible',
+                0xA1: '8251 FIFO Compatible',
+                0xFF: 'Other',
+            }
+            self.add_field('port_type', u.unpack_one("B"), unpack.format_table("{}", port_types))
+        except:
+            self.decodeFailure = True
+            print "Error parsing PortConnectorInfo"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemSlots(SmbiosBaseStructure):
+    smbios_structure_type = 9
+
+    def __init__(self, u, sm):
+        super(SystemSlots, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('designation', u.unpack_one("B"), self.fmtstr)
+            _slot_types = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'ISA',
+                0x04: 'MCA',
+                0x05: 'EISA',
+                0x06: 'PCI',
+                0x07: 'PC Card (PCMCIA)',
+                0x08: 'VL-VESA',
+                0x09: 'Proprietary',
+                0x0A: 'Processor Card Slot',
+                0x0B: 'Proprietary Memory Card Slot',
+                0x0C: 'I/O Riser Card Slot',
+                0x0D: 'NuBus',
+                0x0E: 'PCI 66MHz Capable',
+                0x0F: 'AGP',
+                0x10: 'AGP 2X',
+                0x11: 'AGP 4X',
+                0x12: 'PCI-X',
+                0x13: 'AGP 8X',
+                0xA0: 'PC-98/C20',
+                0xA1: 'PC-98/C24',
+                0xA2: 'PC-98/E',
+                0xA3: 'PC-98/Local Bus',
+                0xA4: 'PC-98/Card',
+                0xA5: 'PCI Express',
+                0xA6: 'PCI Express x1',
+                0xA7: 'PCI Express x2',
+                0xA8: 'PCI Express x4',
+                0xA9: 'PCI Express x8',
+                0xAA: 'PCI Express x16',
+                0xAB: 'PCI Express Gen 2',
+                0xAC: 'PCI Express Gen 2 x1',
+                0xAD: 'PCI Express Gen 2 x2',
+                0xAE: 'PCI Express Gen 2 x4',
+                0xAF: 'PCI Express Gen 2 x8',
+                0xB0: 'PCI Express Gen 2 x16',
+                0xB1: 'PCI Express Gen 3',
+                0xB2: 'PCI Express Gen 3 x1',
+                0xB3: 'PCI Express Gen 3 x2',
+                0xB4: 'PCI Express Gen 3 x4',
+                0xB5: 'PCI Express Gen 3 x8',
+                0xB6: 'PCI Express Gen 3 x16',
+            }
+            self.add_field('slot_type', u.unpack_one("B"), unpack.format_table("{}", _slot_types))
+            _slot_data_bus_widths = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: '8 bit',
+                0x04: '16 bit',
+                0x05: '32 bit',
+                0x06: '64 bit',
+                0x07: '128 bit',
+                0x08: '1x or x1',
+                0x09: '2x or x2',
+                0x0A: '4x or x4',
+                0x0B: '8x or x8',
+                0x0C: '12x or x12',
+                0x0D: '16x or x16',
+                0x0E: '32x or x32',
+            }
+            self.add_field('slot_data_bus_width', u.unpack_one('B'), unpack.format_table("{}", _slot_data_bus_widths))
+            _current_usages = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Available',
+                0x04: 'In use',
+            }
+            self.add_field('current_usage', u.unpack_one('B'), unpack.format_table("{}", _current_usages))
+            _slot_lengths = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Short Length',
+                0x04: 'Long Length',
+            }
+            self.add_field('slot_length', u.unpack_one('B'), unpack.format_table("{}", _slot_lengths))
+            self.add_field('slot_id', u.unpack_one('<H'))
+            self.add_field('characteristics1', u.unpack_one('B'))
+            self.add_field('characteristics_unknown', bool(bitfields.getbits(self.characteristics1, 0)), "characteristics1[0]={}")
+            self.add_field('provides_5_0_volts', bool(bitfields.getbits(self.characteristics1, 1)), "characteristics1[1]={}")
+            self.add_field('provides_3_3_volts', bool(bitfields.getbits(self.characteristics1, 2)), "characteristics1[2]={}")
+            self.add_field('shared_slot', bool(bitfields.getbits(self.characteristics1, 3)), "characteristics1[3]={}")
+            self.add_field('supports_pc_card_16', bool(bitfields.getbits(self.characteristics1, 4)), "characteristics1[4]={}")
+            self.add_field('supports_cardbus', bool(bitfields.getbits(self.characteristics1, 5)), "characteristics1[5]={}")
+            self.add_field('supports_zoom_video', bool(bitfields.getbits(self.characteristics1, 6)), "characteristics1[6]={}")
+            self.add_field('supports_modem_ring_resume', bool(bitfields.getbits(self.characteristics1, 7)), "characteristics1[7]={}")
+            if self.length > 0x0C:
+                self.add_field('characteristics2', u.unpack_one('B'))
+                self.add_field('supports_PME', bool(bitfields.getbits(self.characteristics2, 0)), "characteristics2[0]={}")
+                self.add_field('supports_hot_plug', bool(bitfields.getbits(self.characteristics2, 1)), "characteristics2[1]={}")
+                self.add_field('supports_smbus', bool(bitfields.getbits(self.characteristics2, 2)), "characteristics2[2]={}")
+            if self.length > 0x0D:
+                self.add_field('segment_group_number', u.unpack_one('<H'))
+                self.add_field('bus_number', u.unpack_one('B'))
+                self.add_field('device_function_number', u.unpack_one('B'))
+                self.add_field('device_number', bitfields.getbits(self.device_function_number, 7, 3), "device_function_number[7:3]={}")
+                self.add_field('function_number', bitfields.getbits(self.device_function_number, 2, 0), "device_function_number[2:0]={}")
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemSlots"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class OnBoardDevicesInformation(SmbiosBaseStructure):
+    smbios_structure_type = 10
+
+    def __init__(self, u, sm):
+        super(OnBoardDevicesInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('device_type', u.unpack_one("B"))
+            self.add_field('device_enabled', bool(bitfields.getbits(self.device_type, 7)), "device_type[7]={}")
+            _device_types = {
+                0x01: 'Other',
+                0x02: 'Unknown',
+                0x03: 'Video',
+                0x04: 'SCSI Controller',
+                0x05: 'Ethernet',
+                0x06: 'Token Ring',
+                0x07: 'Sound',
+                0x08: 'PATA Controller',
+                0x09: 'SATA Controller',
+                0x0A: 'SAS Controller'
+            }
+            self.add_field('type_of_device', bitfields.getbits(self.device_type, 6, 0), unpack.format_table("device_type[6:0]={}", _device_types))
+            self.add_field('description_string', u.unpack_one("B"), self.fmtstr)
+        except:
+            self.decodeFailure = True
+            print "Error parsing OnBoardDevicesInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class OEMStrings(SmbiosBaseStructure):
+    smbios_structure_type = 11
+
+    def __init__(self, u, sm):
+        super(OEMStrings, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('count', u.unpack_one("B"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing OEMStrings"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemConfigOptions(SmbiosBaseStructure):
+    smbios_structure_type = 12
+
+    def __init__(self, u, sm):
+        super(SystemConfigOptions, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('count', u.unpack_one("B"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemConfigOptions"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class BIOSLanguageInformation(SmbiosBaseStructure):
+    smbios_structure_type = 13
+
+    def __init__(self, u, sm):
+        super(BIOSLanguageInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('installable_languages', u.unpack_one("B"))
+            if self.length > 0x05:
+                self.add_field('flags', u.unpack_one('B'))
+                self.add_field('abbreviated_format', bool(bitfields.getbits(self.flags, 0)), "flags[0]={}")
+            if self.length > 0x6:
+                u.skip(15)
+                self.add_field('current_language', u.unpack_one('B'), self.fmtstr)
+        except:
+            self.decodeFailure = True
+            print "Error parsing BIOSLanguageInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class GroupAssociations(SmbiosBaseStructure):
+    smbios_structure_type = 14
+
+    def __init__(self, u, sm):
+        super(GroupAssociations, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('group_name', u.unpack_one("B"), self.fmtstr)
+            self.add_field('item_type', u.unpack_one('B'))
+            self.add_field('item_handle', u.unpack_one('<H'))
+        except:
+            self.decodeFailure = True
+            print "Error parsing GroupAssociations"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemEventLog(SmbiosBaseStructure):
+    smbios_structure_type = 15
+
+    def __init__(self, u, sm):
+        super(SystemEventLog, self).__init__(u, sm)
+        u = self.u
+        try:
+            self.add_field('log_area_length', u.unpack_one("<H"))
+            self.add_field('log_header_start_offset', u.unpack_one('<H'))
+            self.add_field('log_data_start_offset', u.unpack_one('<H'))
+            _access_method = {
+                0x00: 'Indexed I/O: 1 8-bit index port, 1 8-bit data port',
+                0x01: 'Indexed I/O: 2 8-bit index ports, 1 8-bit data port',
+                0x02: 'Indexed I/O: 1 16-bit index port, 1 8-bit data port',
+                0x03: 'Memory-mapped physical 32-bit address',
+                0x04: 'Available through General-Purpose NonVolatile Data functions',
+                xrange(0x05, 0x07F): 'Available for future assignment',
+                xrange(0x80, 0xFF): 'BIOS Vendor/OEM-specific'
+                }
+            self.add_field('access_method', u.unpack_one('B'), unpack.format_table("{}", _access_method))
+            self.add_field('log_status', u.unpack_one('B'))
+            self.add_field('log_area_full', bool(bitfields.getbits(self.log_status, 1)), "log_status[1]={}")
+            self.add_field('log_area_valid', bool(bitfields.getbits(self.log_status, 0)), "log_status[0]={}")
+            self.add_field('log_change_token', u.unpack_one('<I'))
+            self.add_field('access_method_address', u.unpack_one('<I'))
+            if self.length > 0x14:
+                _log_header_formats = {
+                    0: 'No header',
+                    1: 'Type 1 log header',
+                    xrange(2, 0x7f): 'Available for future assignment',
+                    xrange(0x80, 0xff): 'BIOS vendor or OEM-specific format'
+                    }
+                self.add_field('log_header_format', u.unpack_one("B"), unpack.format_table("{}", _log_header_formats))
+            if self.length > 0x15:
+                self.add_field('num_supported_log_type_descriptors', u.unpack_one('B'))
+            if self.length > 0x16:
+                self.add_field('length_log_type_descriptor', u.unpack_one('B'))
+            if self.length != (0x17 + (self.num_supported_log_type_descriptors * self.length_log_type_descriptor)):
+                print "Error: structure length ({}) != 0x17 + (num_supported_log_type_descriptors ({}) * length_log_type_descriptor({}))".format(self.length, self.num_supported_log_type_descriptors, self.length_log_type_descriptor)
+                print "structure length = {}".format(self.length)
+                print "num_supported_log_type_descriptors = {}".format(self.num_supported_log_type_descriptors)
+                print "length_log_type_descriptor = {}".format(self.length_log_type_descriptor)
+                self.decodeFailure = True
+            self.add_field('descriptors', tuple(EventLogDescriptor.unpack(u) for i in range(self.num_supported_log_type_descriptors)), unpack.format_each("\n{!r}"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemEventLog"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class EventLogDescriptor(unpack.Struct):
+    @staticmethod
+    def _unpack(u):
+        _event_log_type_descriptors = {
+            0x00: 'Reserved',
+            0x01: 'Single-bit ECC memory error',
+            0x02: 'Multi-bit ECC memory error',
+            0x03: 'Parity memory error',
+            0x04: 'Bus time-out',
+            0x05: 'I/O Channel Check',
+            0x06: 'Software NMI',
+            0x07: 'POST Memory Resize',
+            0x08: 'POST Error',
+            0x09: 'PCI Parity Error',
+            0x0A: 'PCI System Error',
+            0x0B: 'CPU Failure',
+            0x0C: 'EISA FailSafe Timer time-out',
+            0x0D: 'Correctable memory log disabled',
+            0x0E: 'Logging disabled for a specific Event Type - too many errors of the same type received in a short amount of time',
+            0x0F: 'Reserved',
+            0x10: 'System Limit Exceeded',
+            0x11: 'Asynchronous hardware timer expired and issued a system reset',
+            0x12: 'System configuration information',
+            0x13: 'Hard-disk information',
+            0x14: 'System reconfigured',
+            0x15: 'Uncorrectable CPU-complex error',
+            0x16: 'Log Area Reset/Cleared',
+            0x17: 'System boot',
+            xrange(0x18, 0x7F): 'Unused, available for assignment',
+            xrange(0x80, 0xFE): 'Available for system- and OEM-specific assignments',
+            0xFF: 'End of log'
+        }
+        yield 'log_type', u.unpack_one('B'), unpack.format_table("{}", _event_log_type_descriptors)
+        _event_log_format = {
+            0x00: 'None',
+            0x01: 'Handle',
+            0x02: 'Multiple-Event',
+            0x03: 'Multiple-Event Handle',
+            0x04: 'POST Results Bitmap',
+            0x05: 'System Management Type',
+            0x06: 'Multiple-Event System Management Type',
+            xrange(0x80, 0xFF): 'OEM assigned'
+        }
+        yield 'variable_data_format_type', u.unpack_one('B'), unpack.format_table("{}", _event_log_format)
+
+class PhysicalMemoryArray(SmbiosBaseStructure):
+    smbios_structure_type = 16
+
+    def __init__(self, u, sm):
+        super(PhysicalMemoryArray, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _location_field = {
+                    0x01: "Other",
+                    0x02: "Unknown",
+                    0x03: "System board or motherboard",
+                    0x04: "ISA add-on card",
+                    0x05: "EISA add-on card",
+                    0x06: "PCI add-on card",
+                    0x07: "MCA add-on card",
+                    0x08: "PCMCIA add-on card",
+                    0x09: "Proprietary add-on card",
+                    0x0A: "NuBus",
+                    0xA0: "PC-98/C20 add-on card",
+                    0xA1: "PC-98/C24 add-on card",
+                    0xA2: "PC-98/E add-on card",
+                    0xA3: "PC-98/Local bus add-on card"
+                    }
+                self.add_field('location', u.unpack_one("B"), unpack.format_table("{}", _location_field))
+            if self.length > 0x05:
+                _use = {
+                    0x01: "Other",
+                    0x02: "Unknown",
+                    0x03: "System memory",
+                    0x04: "Video memory",
+                    0x05: "Flash memory",
+                    0x06: "Non-volatile RAM",
+                    0x07: "Cache memory"
+                    }
+                self.add_field('use', u.unpack_one('B'), unpack.format_table("{}", _use))
+            if self.length > 0x06:
+                _error_correction = {
+                    0x01: "Other",
+                    0x02: "Unknown",
+                    0x03: "None",
+                    0x04: "Parity",
+                    0x05: "Single-bit ECC",
+                    0x06: "Multi-bit ECC",
+                    0x07: "CRC"
+                    }
+                self.add_field('memory_error_correction', u.unpack_one('B'), unpack.format_table("{}", _error_correction))
+            if self.length > 0x07:
+                self.add_field('maximum_capacity', u.unpack_one('<I'))
+            if self.length > 0x0B:
+                self.add_field('memory_error_information_handle', u.unpack_one('<H'))
+            if self.length > 0x0D:
+                self.add_field('num_memory_devices', u.unpack_one('<H'))
+            if self.length > 0x0F:
+                self.add_field('extended_maximum_capacity', u.unpack_one('<Q'))
+        except:
+            self.decodeFailure = True
+            print "Error parsing PhysicalMemoryArray"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryDevice(SmbiosBaseStructure):
+    smbios_structure_type = 17
+
+    def __init__(self, u, sm):
+        super(MemoryDevice, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('physical_memory_array_handle', u.unpack_one("<H"))
+            if self.length > 0x6:
+                self.add_field('memory_error_information_handle', u.unpack_one("<H"))
+            if self.length > 0x8:
+                self.add_field('total_width', u.unpack_one("<H"))
+            if self.length > 0xA:
+                self.add_field('data_width', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('size', u.unpack_one("<H"))
+            if self.length > 0xE:
+                _form_factors = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'SIMM',
+                    0x04: 'SIP',
+                    0x05: 'Chip',
+                    0x06: 'DIP',
+                    0x07: 'ZIP',
+                    0x08: 'Proprietary Card',
+                    0x09: 'DIMM',
+                    0x0A: 'TSOP',
+                    0x0B: 'Row of chips',
+                    0x0C: 'RIMM',
+                    0x0D: 'SODIMM',
+                    0x0E: 'SRIMM',
+                    0x0F: 'FB-DIMM'
+                    }
+                self.add_field('form_factor', u.unpack_one("B"), unpack.format_table("{}", _form_factors))
+            if self.length > 0xF:
+                self.add_field('device_set', u.unpack_one("B"))
+            if self.length > 0x10:
+                self.add_field('device_locator', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x11:
+                self.add_field('bank_locator', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x12:
+                _memory_types = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'DRAM',
+                    0x04: 'EDRAM',
+                    0x05: 'VRAM',
+                    0x06: 'SRAM',
+                    0x07: 'RAM',
+                    0x08: 'ROM',
+                    0x09: 'FLASH',
+                    0x0A: 'EEPROM',
+                    0x0B: 'FEPROM',
+                    0x0C: 'EPROM',
+                    0x0D: 'CDRAM',
+                    0x0E: '3DRAM',
+                    0x0F: 'SDRAM',
+                    0x10: 'SGRAM',
+                    0x11: 'RDRAM',
+                    0x12: 'DDR',
+                    0x13: 'DDR2',
+                    0x14: 'DDR2 FB-DIMM',
+                    xrange(0x15, 0x17): 'Reserved',
+                    0x18: 'DDR3',
+                    0x19: 'FBD2'
+                    }
+                self.add_field('memory_type', u.unpack_one("B"), unpack.format_table("{}", _memory_types))
+            if self.length > 0x13:
+                self.add_field('type_detail', u.unpack_one('<H'))
+            if self.length > 0x15:
+                self.add_field('speed', u.unpack_one("<H"))
+            if self.length > 0x17:
+                self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x18:
+                self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x19:
+                self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x1A:
+                self.add_field('part_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x1B:
+                self.add_field('attributes', u.unpack_one("B"))
+                self.add_field('rank', bitfields.getbits(self.attributes, 3, 0), "attributes[3:0]={}")
+            if self.length > 0x1C:
+                if self.size == 0x7FFF:
+                    self.add_field('extended_size', u.unpack_one('<I'))
+                    self.add_field('mem_size', bitfields.getbits(self.type_detail, 30, 0), "type_detail[30:0]={}")
+                else:
+                    u.skip(4)
+            if self.length > 0x20:
+                self.add_field('configured_memory_clock_speed', u.unpack_one("<H"))
+            if self.length > 0x22:
+                self.add_field('minimum_voltage', u.unpack_one("<H"))
+            if self.length > 0x24:
+                self.add_field('maximum_voltage', u.unpack_one("<H"))
+            if self.length > 0x26:
+                self.add_field('configured_voltage', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryDevice"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryErrorInfo32Bit(SmbiosBaseStructure):
+    smbios_structure_type = 18
+
+    def __init__(self, u, sm):
+        super(MemoryErrorInfo32Bit, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _error_types = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'OK',
+                    0x04: 'Bad read',
+                    0x05: 'Parity error',
+                    0x06: 'Single-bit error',
+                    0x07: 'Double-bit error',
+                    0x08: 'Multi-bit error',
+                    0x09: 'Nibble error',
+                    0x0A: 'Checksum error',
+                    0x0B: 'CRC error',
+                    0x0C: 'Corrected single-bit error',
+                    0x0D: 'Corrected error',
+                    0x0E: 'Uncorrectable error'
+                    }
+                self.add_field('error_type', u.unpack_one("B"), unpack.format_table("{}", _error_types))
+            if self.length > 0x5:
+                 _error_granularity_field = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Device level',
+                    0x04: 'Memory partition level'
+                    }
+                 self.add_field('error_granularity', u.unpack_one("B"), unpack.format_table("{}", _error_granularity_field))
+            if self.length > 0x6:
+                _error_operation_field = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Read',
+                    0x04: 'Write',
+                    0x05: 'Partial write'
+                    }
+                self.add_field('error_operation', u.unpack_one("B"), unpack.format_table("{}", _error_operation_field))
+            if self.length > 0x7:
+                self.add_field('vendor_syndrome', u.unpack_one("<I"))
+            if self.length > 0xB:
+                self.add_field('memory_array_error_address', u.unpack_one("<I"))
+            if self.length > 0xF:
+                self.add_field('device_error_address', u.unpack_one("<I"))
+            if self.length > 0x13:
+                self.add_field('error_resolution', u.unpack_one("<I"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryErrorInfo32Bit"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryArrayMappedAddress(SmbiosBaseStructure):
+    smbios_structure_type = 19
+
+    def __init__(self, u, sm):
+        super(MemoryArrayMappedAddress, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('starting_address', u.unpack_one("<I"))
+                # if FFFF FFFF: address stored in Extended Starting Address
+            if self.length > 0x8:
+                self.add_field('ending_address', u.unpack_one("<I"))
+            if self.length > 0xC:
+                self.add_field('memory_array_handle', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('partition_width', u.unpack_one("B"))
+            if self.length > 0xF:
+                # valid if starting_address = FFFF FFFF
+                if self.starting_address == 0xFFFFFFFF:
+                    self.add_field('extended_starting_address', u.unpack_one("<Q"))
+                    if self.length > 0x17:
+                        self.add_field('extended_ending_address', u.unpack_one("<Q"))
+                else:
+                    u.skip(16)
+
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryArrayMappedAddress"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryDeviceMappedAddress(SmbiosBaseStructure):
+    smbios_structure_type = 20
+
+    def __init__(self, u, sm):
+        super(MemoryDeviceMappedAddress, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('starting_address', u.unpack_one("<I"))
+                # if FFFF FFFF: address stored in Extended Starting Address
+            if self.length > 0x8:
+                self.add_field('ending_address', u.unpack_one("<I"))
+            if self.length > 0xC:
+                self.add_field('memory_device_handle', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('memory_array_mapped_address_handle', u.unpack_one("<H"))
+            if self.length > 0x10:
+                self.add_field('partition_row_position', u.unpack_one("B"))
+            if self.length > 0x11:
+                self.add_field('interleave_position', u.unpack_one("B"))
+            if self.length > 0x12:
+                self.add_field('interleave_data_depth', u.unpack_one("B"))
+            if self.length > 0x13:
+                # valid if starting_address = FFFF FFFF
+                if self.starting_address == 0xFFFFFFFF:
+                    self.add_field('extended_starting_address', u.unpack_one("<Q"))
+                    if self.length > 0x1B:
+                        self.add_field('extended_ending_address', u.unpack_one("<Q"))
+                else:
+                    u.skip(16)
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryDeviceMappedAddress"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class BuiltInPointingDevice(SmbiosBaseStructure):
+    smbios_structure_type = 21
+
+    def __init__(self, u, sm):
+        super(BuiltInPointingDevice, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _pointing_device_types = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Mouse',
+                    0x04: 'Track Ball',
+                    0x05: 'Track Point',
+                    0x06: 'Glide Point',
+                    0x07: 'Touch Pad',
+                    0x08: 'Touch Screen',
+                    0x09: 'Optical Sensor'
+                    }
+                self.add_field('pointing_device_type', u.unpack_one("B"), unpack.format_table("{}", _pointing_device_types))
+            if self.length > 0x5:
+                _interfaces = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Serial',
+                    0x04: 'PS/2',
+                    0x05: 'Infared',
+                    0x06: 'HP-HIL',
+                    0x07: 'Bus mouse',
+                    0x08: 'ADB (Apple Desktop Bus)',
+                    0x09: 'Bus mouse DB-9',
+                    0x0A: 'Bus mouse micro-DIN',
+                    0x0B: 'USB'
+                    }
+                self.add_field('interface', u.unpack_one("B"), unpack.format_table("{}", _interfaces))
+            if self.length > 0x6:
+                self.add_field('num_buttons', u.unpack_one("B"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing BuiltInPointingDevice"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class PortableBattery(SmbiosBaseStructure):
+    smbios_structure_type = 22
+
+    def __init__(self, u, sm):
+        super(PortableBattery, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('location', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x6:
+                self.add_field('manufacturer_date', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x7:
+                self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x8:
+                self.add_field('device_name', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x9:
+                _device_chemistry = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Lead Acid',
+                    0x04: 'Nickel Cadmium',
+                    0x05: 'Nickel metal hydride',
+                    0x06: 'Lithium-ion',
+                    0x07: 'Zinc air',
+                    0x08: 'Lithium Polymer'
+                    }
+                self.add_field('device_chemistry', u.unpack_one("B"), unpack.format_table("{}", _device_chemistry))
+            if self.length > 0xA:
+                self.add_field('design_capacity', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('design_voltage', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('sbds_version_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0xF:
+                self.add_field('max_error_battery_data', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x10:
+                if self.serial_number == 0:
+                    self.add_field('sbds_serial_number', u.unpack_one("<H"))
+                else:
+                    u.skip(2)
+            if self.length > 0x12:
+                if self.manufacturer_date == 0:
+                    self.add_field('sbds_manufacture_date', u.unpack_one("<H"))
+                    self.add_field('year_biased_by_1980', bitfields.getbits(self.sbds_manufacture_date, 15, 9), "sbds_manufacture_date[15:9]={}")
+                    self.add_field('month', bitfields.getbits(self.sbds_manufacture_date, 8, 5), "sbds_manufacture_date[8:5]={}")
+                    self.add_field('date', bitfields.getbits(self.sbds_manufacture_date, 4, 0), "sbds_manufacture_date[4:0]={}")
+                else:
+                    u.skip(2)
+            if self.length > 0x14:
+                if self.device_chemistry == 0x02:
+                    self.add_field('sbds_device_chemistry', u.unpack_one("B"), self.fmtstr)
+                else:
+                    u.skip(1)
+            if self.length > 0x15:
+                self.add_field('design_capacity_multiplier', u.unpack_one("B"))
+            if self.length > 0x16:
+                self.add_field('oem_specific', u.unpack_one("<I"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing PortableBattery"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemReset(SmbiosBaseStructure):
+    smbios_structure_type = 23
+
+    def __init__(self, u, sm):
+        super(SystemReset, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('capabilities', u.unpack_one("B"))
+                self.add_field('contains_watchdog_timer', bool(bitfields.getbits(self.capabilities, 5)), "capabilities[5]={}")
+                _boot_option = {
+                    0b00: 'Reserved, do not use',
+                    0b01: 'Operating System',
+                    0b10: 'System utilities',
+                    0b11: 'Do not reboot'
+                    }
+                self.add_field('boot_option_on_limit', bitfields.getbits(self.capabilities, 4, 3), unpack.format_table("capabilities[4:3]={}", _boot_option))
+                self.add_field('boot_option_after_watchdog_reset', bitfields.getbits(self.capabilities, 2, 1), unpack.format_table("capabilities[2:1]={}", _boot_option))
+                self.add_field('system_reset_enabled_by_user', bool(bitfields.getbits(self.capabilities, 0)), "capabilities[0]={}")
+            if self.length > 0x5:
+                self.add_field('reset_count', u.unpack_one("<H"))
+            if self.length > 0x5:
+                self.add_field('reset_limit', u.unpack_one("<H"))
+            if self.length > 0x9:
+                self.add_field('timer_interval', u.unpack_one("<H"))
+            if self.length > 0xB:
+                self.add_field('timeout', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemReset"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class HardwareSecurity(SmbiosBaseStructure):
+    smbios_structure_type = 24
+
+    def __init__(self, u, sm):
+        super(HardwareSecurity, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('hardware_security_settings', u.unpack_one("B"))
+                _status = {
+                    0x00: 'Disabled',
+                    0x01: 'Enabled',
+                    0x02: 'Not Implemented',
+                    0x03: 'Unknown'
+                    }
+                self.add_field('power_on_password_status', bitfields.getbits(self.hardware_security_settings, 7, 6), unpack.format_table("hardware_security_settings[7:6]={}", _status))
+                self.add_field('keyboard_password_status', bitfields.getbits(self.hardware_security_settings, 5, 4), unpack.format_table("hardware_security_settings[5:4]={}", _status))
+                self.add_field('admin_password_status', bitfields.getbits(self.hardware_security_settings, 3, 2), unpack.format_table("hardware_security_settings0[3:2]={}", _status))
+                self.add_field('front_panel_reset_status', bitfields.getbits(self.hardware_security_settings, 1, 0), unpack.format_table("hardware_security_settings[1:0]={}", _status))
+        except:
+            self.decodeFailure = True
+            print "Error parsing HardwareSecurity"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemPowerControls(SmbiosBaseStructure):
+    smbios_structure_type = 25
+
+    def __init__(self, u, sm):
+        super(SystemPowerControls, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('next_scheduled_poweron_month', u.unpack_one("B"))
+                self.add_field('next_scheduled_poweron_day_of_month', u.unpack_one("B"))
+                self.add_field('next_scheduled_poweron_hour', u.unpack_one("B"))
+                self.add_field('next_scheduled_poweron_minute', u.unpack_one("B"))
+                self.add_field('next_scheduled_poweron_second', u.unpack_one("B"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemPowerControls"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class VoltageProbe(SmbiosBaseStructure):
+    smbios_structure_type = 26
+
+    def __init__(self, u, sm):
+        super(VoltageProbe, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('description', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('location_and_status', u.unpack_one("B"))
+                _status = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'OK',
+                    0b100: 'Non-critical',
+                    0b101: 'Critical',
+                    0b110: 'Non-recoverable'
+                    }
+                _location = {
+                    0b00001: 'Other',
+                    0b00010: 'Unknown',
+                    0b00011: 'Processor',
+                    0b00100: 'Disk',
+                    0b00101: 'Peripheral Bay',
+                    0b00110: 'System Management Module',
+                    0b00111: 'Motherboard',
+                    0b01000: 'Memory Module',
+                    0b01001: 'Processor Module',
+                    0b01010: 'Power Unit',
+                    0b01011: 'Add-in Card'
+                    }
+                self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status))
+                self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location))
+            if self.length > 0x6:
+                self.add_field('max_value', u.unpack_one("<H"))
+            if self.length > 0x8:
+                self.add_field('min_value', u.unpack_one("<H"))
+            if self.length > 0xA:
+                self.add_field('resolution', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('tolerance', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('accuracy', u.unpack_one("<H"))
+            if self.length > 0x10:
+                self.add_field('oem_defined', u.unpack_one("<I"))
+            if self.length > 0x14:
+                self.add_field('nominal_value', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing VoltageProbe"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class CoolingDevice(SmbiosBaseStructure):
+    smbios_structure_type = 27
+
+    def __init__(self, u, sm):
+        super(CoolingDevice, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('temperature_probe_handle', u.unpack_one("<H"))
+            if self.length > 0x6:
+                self.add_field('device_type_and_status', u.unpack_one("B"))
+                _status = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'OK',
+                    0b100: 'Non-critical',
+                    0b101: 'Critical',
+                    0b110: 'Non-recoverable'
+                    }
+                _type = {
+                    0b00001: 'Other',
+                    0b00010: 'Unknown',
+                    0b00011: 'Fan',
+                    0b00100: 'Centrifugal Blower',
+                    0b00101: 'Chip Fan',
+                    0b00110: 'Cabinet Fan',
+                    0b00111: 'Power Supply Fan',
+                    0b01000: 'Heat Pipe',
+                    0b01001: 'Integrated Refrigeration',
+                    0b10000: 'Active Cooling',
+                    0b10001: 'Passive Cooling'
+                    }
+                self.add_field('status', bitfields.getbits(self.device_type_and_status, 7, 5), unpack.format_table("device_type_and_status[7:5]={}", _status))
+                self.add_field('device_type', bitfields.getbits(self.device_type_and_status, 4, 0), unpack.format_table("device_type_and_status[4:0]={}", _type))
+            if self.length > 0x7:
+                self.add_field('cooling_unit_group', u.unpack_one("B"))
+            if self.length > 0x8:
+                self.add_field('OEM_defined', u.unpack_one("<I"))
+            if self.length > 0xC:
+                self.add_field('nominal_speed', u.unpack_one("<H"))
+            if self.length > 0xE:
+               self.add_field('description', u.unpack_one("B"), self.fmtstr)
+        except:
+            self.decodeFailure = True
+            print "Error parsing CoolingDevice"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class TemperatureProbe(SmbiosBaseStructure):
+    smbios_structure_type = 28
+
+    def __init__(self, u, sm):
+        super(TemperatureProbe, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('description', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('location_and_status', u.unpack_one("B"))
+                _status = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'OK',
+                    0b100: 'Non-critical',
+                    0b101: 'Critical',
+                    0b110: 'Non-recoverable'
+                    }
+                _location = {
+                    0b00001: 'Other',
+                    0b00010: 'Unknown',
+                    0b00011: 'Processor',
+                    0b00100: 'Disk',
+                    0b00101: 'Peripheral Bay',
+                    0b00110: 'System Management Module',
+                    0b00111: 'Motherboard',
+                    0b01000: 'Memory Module',
+                    0b01001: 'Processor Module',
+                    0b01010: 'Power Unit',
+                    0b01011: 'Add-in Card',
+                    0b01100: 'Front Panel Board',
+                    0b01101: 'Back Panel Board',
+                    0b01110: 'Power System Board',
+                    0b01111: 'Drive Back Plane'
+                    }
+                self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status))
+                self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location))
+            if self.length > 0x6:
+                self.add_field('maximum_value', u.unpack_one("<H"))
+            if self.length > 0x8:
+                self.add_field('minimum_value', u.unpack_one("<H"))
+            if self.length > 0xA:
+                self.add_field('resolution', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('tolerance', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('accuracy', u.unpack_one("<H"))
+            if self.length > 0x10:
+                self.add_field('OEM_defined', u.unpack_one("<I"))
+            if self.length > 0x14:
+                self.add_field('nominal_value', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing TemperatureProbe"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class ElectricalCurrentProbe(SmbiosBaseStructure):
+    smbios_structure_type = 29
+
+    def __init__(self, u, sm):
+        super(ElectricalCurrentProbe, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('description', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('location_and_status', u.unpack_one("B"))
+                _status = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'OK',
+                    0b100: 'Non-critical',
+                    0b101: 'Critical',
+                    0b110: 'Non-recoverable'
+                    }
+                _location = {
+                    0b00001: 'Other',
+                    0b00010: 'Unknown',
+                    0b00011: 'Processor',
+                    0b00100: 'Disk',
+                    0b00101: 'Peripheral Bay',
+                    0b00110: 'System Management Module',
+                    0b00111: 'Motherboard',
+                    0b01000: 'Memory Module',
+                    0b01001: 'Processor Module',
+                    0b01010: 'Power Unit',
+                    0b01011: 'Add-in Card',
+                    0b01100: 'Front Panel Board',
+                    0b01101: 'Back Panel Board',
+                    0b01110: 'Power System Board',
+                    0b01111: 'Drive Back Plane'
+                    }
+                self.add_field('status', bitfields.getbits(self.location_and_status, 7, 5), unpack.format_table("location_and_status[7:5]={}", _status))
+                self.add_field('location', bitfields.getbits(self.location_and_status, 4, 0), unpack.format_table("location_and_status[4:0]={}", _location))
+            if self.length > 0x6:
+                self.add_field('maximum_value', u.unpack_one("<H"))
+            if self.length > 0x8:
+                self.add_field('minimum_value', u.unpack_one("<H"))
+            if self.length > 0xA:
+                self.add_field('resolution', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('tolerance', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('accuracy', u.unpack_one("<H"))
+            if self.length > 0x10:
+                self.add_field('OEM_defined', u.unpack_one("<I"))
+            if self.length > 0x14:
+                self.add_field('nominal_value', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing ElectricalCurrentProbe"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class OutOfBandRemoteAccess(SmbiosBaseStructure):
+    smbios_structure_type = 30
+
+    def __init__(self, u, sm):
+        super(OutOfBandRemoteAccess, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('manufacturer_name', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('connections', u.unpack_one("B"))
+                self.add_field('outbound_connection_enabled', bool(bitfields.getbits(self.connections, 1)), "connections[1]={}")
+                self.add_field('inbound_connection_enabled', bool(bitfields.getbits(self.connections, 0)), "connections[0]={}")
+        except:
+            self.decodeFailure = True
+            print "Error parsing OutOfBandRemoteAccess"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class BootIntegrityServicesEntryPoint(SmbiosBaseStructure):
+    smbios_structure_type = 31
+
+class SystemBootInformation(SmbiosBaseStructure):
+    smbios_structure_type = 32
+
+    def __init__(self, u, sm):
+        super(SystemBootInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0xA:
+                u.skip(6)
+                _boot_status = {
+                    0: 'No errors detected',
+                    1: 'No bootable media',
+                    2: '"normal" operating system failed to load',
+                    3: 'Firmware-detected hardware failure, including "unknown" failure types',
+                    4: 'Operating system-detected hardware failure',
+                    5: 'User-requested boot, usually through a keystroke',
+                    6: 'System security violation',
+                    7: 'Previously-requested image',
+                    8: 'System watchdog timer expired, causing the system to reboot',
+                    xrange(9,127): 'Reserved for future assignment',
+                    xrange(128, 191): 'Vendor/OEM-specific implementations',
+                    xrange(192, 255): 'Product-specific implementations'
+                    }
+                self.add_field('boot_status', u.unpack_one("B"), unpack.format_table("{}", _boot_status))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemBootInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryErrorInfo64Bit(SmbiosBaseStructure):
+    smbios_structure_type = 33
+
+    def __init__(self, u, sm):
+        super(MemoryErrorInfo64Bit, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _error_types = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'OK',
+                    0x04: 'Bad read',
+                    0x05: 'Parity error',
+                    0x06: 'Single-bit error',
+                    0x07: 'Double-bit error',
+                    0x08: 'Multi-bit error',
+                    0x09: 'Nibble error',
+                    0x0A: 'Checksum error',
+                    0x0B: 'CRC error',
+                    0x0C: 'Corrected single-bit error',
+                    0x0D: 'Corrected error',
+                    0x0E: 'Uncorrectable error'
+                    }
+                self.add_field('error_type', u.unpack_one("B"), unpack.format_table("{}", _error_types))
+            if self.length > 0x5:
+                 _error_granularity_field = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Device level',
+                    0x04: 'Memory partition level'
+                    }
+                 self.add_field('error_granularity', u.unpack_one("B"), unpack.format_table("{}", _error_granularity_field))
+            if self.length > 0x6:
+                _error_operation_field = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Read',
+                    0x04: 'Write',
+                    0x05: 'Partial write'
+                    }
+                self.add_field('error_operation', u.unpack_one("B"), unpack.format_table("{}", _error_operation_field))
+            if self.length > 0x7:
+                self.add_field('vendor_syndrome', u.unpack_one("<I"))
+            if self.length > 0xB:
+                self.add_field('memory_array_error_address', u.unpack_one("<Q"))
+            if self.length > 0xF:
+                self.add_field('device_error_address', u.unpack_one("<Q"))
+            if self.length > 0x13:
+                self.add_field('error_resolution', u.unpack_one("<Q"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryErrorInfo64Bit"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class ManagementDevice(SmbiosBaseStructure):
+    smbios_structure_type = 34
+
+    def __init__(self, u, sm):
+        super(ManagementDevice, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('description', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                _type = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'National Semiconductor LM75',
+                    0x04: 'National Semiconductor LM78',
+                    0x05: 'National Semiconductor LM79',
+                    0x06: 'National Semiconductor LM80',
+                    0x07: 'National Semiconductor LM81',
+                    0x08: 'Analog Devices ADM9240',
+                    0x09: 'Dallas Semiconductor DS1780',
+                    0x0A: 'Maxim 1617',
+                    0x0B: 'Genesys GL518SM',
+                    0x0C: 'Winbond W83781D',
+                    0x0D: 'Holtek HT82H791'
+                    }
+                self.add_field('device_type', u.unpack_one("B"), unpack.format_table("{}", _type))
+            if self.length > 0x6:
+                self.add_field('address', u.unpack_one("<I"))
+            if self.length > 0xA:
+                 _address_type = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'I/O Port',
+                    0x04: 'Memory',
+                    0x05: 'SM Bus'
+                    }
+                 self.add_field('address_type', u.unpack_one("B"), unpack.format_table("{}", _address_type))
+        except:
+            self.decodeFailure = True
+            print "Error parsing ManagementDevice"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class ManagementDeviceComponent(SmbiosBaseStructure):
+    smbios_structure_type = 35
+
+    def __init__(self, u, sm):
+        super(ManagementDeviceComponent, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('description', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('management_device_handle', u.unpack_one("<H"))
+            if self.length > 0x7:
+                self.add_field('component_handle', u.unpack_one("<H"))
+            if self.length > 0x9:
+                self.add_field('threshold_handle', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing ManagementDeviceComponent"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class ManagementDeviceThresholdData(SmbiosBaseStructure):
+    smbios_structure_type = 36
+
+    def __init__(self, u, sm):
+        super(ManagementDeviceThresholdData, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('lower_threshold_noncritical', u.unpack_one("<H"))
+            if self.length > 0x6:
+                self.add_field('upper_threshold_noncritical', u.unpack_one("<H"))
+            if self.length > 0x8:
+                self.add_field('lower_threshold_critical', u.unpack_one("<H"))
+            if self.length > 0xA:
+                self.add_field('upper_threshold_critical', u.unpack_one("<H"))
+            if self.length > 0xC:
+                self.add_field('lower_threshold_nonrecoverable', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('upper_threshold_nonrecoverable', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing ManagementDeviceThresholdData"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class MemoryChannel(SmbiosBaseStructure):
+    smbios_structure_type = 37
+
+    def __init__(self, u, sm):
+        super(MemoryChannel, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _channel_type = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'RamBus',
+                    0x04: 'SyncLink'
+                    }
+                self.add_field('channel_type', u.unpack_one("B"), unpack.format_table("{}", _channel_type))
+            if self.length > 0x6:
+                self.add_field('max_channel_load', u.unpack_one("B"))
+            if self.length > 0x8:
+                self.add_field('memory_device_count', u.unpack_one("B"))
+            if self.length > 0xA:
+                self.add_field('memory_device_load', u.unpack_one("B"))
+            if self.length > 0xC:
+                self.add_field('memory_device_handle', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing MemoryChannel"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class IPMIDeviceInformation(SmbiosBaseStructure):
+    smbios_structure_type = 38
+
+    def __init__(self, u, sm):
+        super(IPMIDeviceInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            _interface_type = {
+                0x00: 'Unknown',
+                0x01: 'KCS: Keyboard Controller Style',
+                0x02: 'SMIC: Server Management Interface Chip',
+                0x03: 'BT: Block Transfer',
+                xrange(0x04, 0xFF): 'Reserved'
+                }
+            self.add_field('interface_type', u.unpack_one("B"), unpack.format_table("{}", _interface_type))
+            self.add_field('ipmi_specification_revision', u.unpack_one("B"))
+            self.add_field('msd_revision', bitfields.getbits(self.ipmi_specification_revision, 7, 4), "ipmi_specification_revision[7:4]={}")
+            self.add_field('lsd_revision', bitfields.getbits(self.ipmi_specification_revision, 3, 0), "ipmi_specification_revision[3:0]={}")
+
+            self.add_field('i2c_slave_address', u.unpack_one("B"))
+            self.add_field('nv_storage_device_address', u.unpack_one("B"))
+            self.add_field('base_address', u.unpack_one("<Q"))
+            # if lsb is 1, address is in IO space. otherwise, memory-mapped
+            self.add_field('base_address_modifier_interrupt_info', u.unpack_one("B"))
+            _reg_spacing = {
+                0b00: 'Interface registers are on successive byte boundaries',
+                0b01: 'Interface registers are on 32-bit boundaries',
+                0b10: 'Interface registers are on 16-byte boundaries',
+                0b11: 'Reserved'
+                }
+            self.add_field('register_spacing', bitfields.getbits(self.base_address_modifier_interrupt_info, 7, 6), unpack.format_table("base_address_modifier_interrupt_info[7:6]={}", _reg_spacing))
+            self.add_field('ls_bit_for_addresses', bitfields.getbits(self.base_address_modifier_interrupt_info, 4), "base_address_modifier_interrupt_info[4]={}")
+            self.add_field('interrupt_info_specified', bool(bitfields.getbits(self.base_address_modifier_interrupt_info, 3)), "base_address_modifier_interrupt_info[3]={}")
+            _polarity = {
+                0: 'active low',
+                1: 'active high'
+                }
+            self.add_field('interrupt_polarity', bitfields.getbits(self.base_address_modifier_interrupt_info, 1), unpack.format_table("base_address_modifier_interrupt_info[1]={}", _polarity))
+            _interrupt_trigger = {
+                0: 'edge',
+                1: 'level'
+                }
+            self.add_field('interrupt_trigger_mode', bitfields.getbits(self.base_address_modifier_interrupt_info, 0), unpack.format_table("base_address_modifier_interrupt_info[0]={}", _interrupt_trigger))
+            self.add_field('interrupt_number', u.unpack_one("B"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing IPMIDeviceInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class SystemPowerSupply(SmbiosBaseStructure):
+    smbios_structure_type = 39
+
+    def __init__(self, u, sm):
+        super(SystemPowerSupply, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('power_unit_group', u.unpack_one("B"))
+            if self.length > 0x5:
+                self.add_field('location', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x6:
+                self.add_field('device_name', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x7:
+                self.add_field('manufacturer', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x8:
+                self.add_field('serial_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x9:
+                self.add_field('asset_tag', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0xA:
+                self.add_field('model_part_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0xB:
+                self.add_field('revision_level', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0xC:
+                self.add_field('max_power_capacity', u.unpack_one("<H"))
+            if self.length > 0xE:
+                self.add_field('power_supply_characteristics', u.unpack_one("<H"))
+                _dmtf_power_supply_type = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'Linear',
+                    0b100: 'Switching',
+                    0b101: 'Battery',
+                    0b110: 'UPS',
+                    0b111: 'Converter',
+                    0b1000: 'Regulator',
+                    xrange(0b1001, 0b1111): 'Reserved'
+                    }
+                self.add_field('dmtf_power_supply_type', bitfields.getbits(self.power_supply_characteristics, 13, 10), unpack.format_table("power_supply_characteristics[13:10]={}", _dmtf_power_supply_type))
+                _status = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'OK',
+                    0b100: 'Non-critical',
+                    0b101: 'Critical; power supply has failed and has been taken off-line'
+                    }
+                self.add_field('status', bitfields.getbits(self.power_supply_characteristics, 9, 7), unpack.format_table("power_supply_characteristics[9:7]={}", _status))
+                _dmtf_input_voltage_range_switching = {
+                    0b001: 'Other',
+                    0b010: 'Unknown',
+                    0b011: 'Manual',
+                    0b100: 'Auto-switch',
+                    0b101: 'Wide range',
+                    0b110: 'Not applicable',
+                    xrange(0b0111, 0b1111): 'Reserved'
+                    }
+                self.add_field('dmtf_input_voltage_range_switching', bitfields.getbits(self.power_supply_characteristics, 6, 3), unpack.format_table("power_supply_characteristics[6:3]={}", _dmtf_input_voltage_range_switching))
+                self.add_field('power_supply_unplugged', bool(bitfields.getbits(self.power_supply_characteristics, 2)), "power_supply_characteristics[2]={}")
+                self.add_field('power_supply_present', bool(bitfields.getbits(self.power_supply_characteristics, 1)), "power_supply_characteristics[1]={}")
+                self.add_field('power_supply_hot_replaceable', bool(bitfields.getbits(self.power_supply_characteristics, 0)), "power_supply_characteristics[0]={}")
+            if self.length > 0x10:
+                self.add_field('input_voltage_probe_handle', u.unpack_one("<H"))
+            if self.length > 0x12:
+                self.add_field('cooling_device_handle', u.unpack_one("<H"))
+            if self.length > 0x14:
+                self.add_field('input_current_probe_handle', u.unpack_one("<H"))
+        except:
+            self.decodeFailure = True
+            print "Error parsing SystemPowerSupply"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class AdditionalInformation(SmbiosBaseStructure):
+    smbios_structure_type = 40
+
+    def __init__(self, u, sm):
+        super(AdditionalInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('num_additional_information_entries', u.unpack_one("B"))
+            if self.length > 0x5:
+                self.add_field('additional_information_entry_length', u.unpack_one("B"))
+                self.add_field('referenced_handle', u.unpack_one("<H"))
+                self.add_field('referenced_offset', u.unpack_one("B"))
+                self.add_field('string', u.unpack_one("B"), self.fmtstr)
+                self.add_field('value', u.unpack_rest())
+        except:
+            self.decodeFailure = True
+            print "Error parsing AdditionalInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class OnboardDevicesExtendedInformation(SmbiosBaseStructure):
+    smbios_structure_type = 41
+
+    def __init__(self, u, sm):
+        super(OnboardDevicesExtendedInformation, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                self.add_field('reference_designation', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0x5:
+                self.add_field('device_type', u.unpack_one("B"))
+                self.add_field('device_enabled', bool(bitfields.getbits(self.device_type, 7)), "device_type[7]={}")
+                _device_types = {
+                    0x01: 'Other',
+                    0x02: 'Unknown',
+                    0x03: 'Video',
+                    0x04: 'SCSI Controller',
+                    0x05: 'Ethernet',
+                    0x06: 'Token Ring',
+                    0x07: 'Sound',
+                    0x08: 'PATA Controller',
+                    0x09: 'SATA Controller',
+                    0x0A: 'SAS Controller'
+                    }
+                self.add_field('type_of_device', bitfields.getbits(self.device_type, 6, 0), unpack.format_table("device_type[6:0]={}", _device_types))
+            if self.length > 0x6:
+                self.add_field('device_type_instance', u.unpack_one("B"))
+            if self.length > 0x7:
+                self.add_field('segment_group_number', u.unpack_one("<H"))
+            if self.length > 0x9:
+                self.add_field('bus_number', u.unpack_one("B"), self.fmtstr)
+            if self.length > 0xA:
+                self.add_field('device_and_function_number', u.unpack_one("B"))
+                self.add_field('device_number', bitfields.getbits(self.device_type, 7, 3), "device_and_function_number[7:3]={}")
+                self.add_field('function_number', bitfields.getbits(self.device_type, 2, 0), "device_and_function_number[2:0]={}")
+        except:
+            self.decodeFailure = True
+            print "Error parsing OnboardDevicesExtendedInformation"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class ManagementControllerHostInterface(SmbiosBaseStructure):
+    smbios_structure_type = 42
+
+    def __init__(self, u, sm):
+        super(ManagementControllerHostInterface, self).__init__(u, sm)
+        u = self.u
+        try:
+            if self.length > 0x4:
+                _interface_types = {
+                    0x00: 'Reserved',
+                    0x01: 'Reserved',
+                    0x02: 'KCS: Keyboard Controller Style',
+                    0x03: '8250 UART Register Compatible',
+                    0x04: '16450 UART Register Compatible',
+                    0x05: '16550/16550A UART Register Compatible',
+                    0x06: '16650/16650A UART Register Compatible',
+                    0x07: '16750/16750A UART Register Compatible',
+                    0x08: '16850/16850A UART Register Compatible',
+                    0xF0: 'OEM'
+                    }
+                self.add_field('interface_type', u.unpack_one("B"), unpack.format_table("{}", _interface_types))
+            if self.length > 0x5:
+                self.add_field('mc_host_interface_data', u.unpack_rest(), self.fmtstr)
+        except:
+            self.decodeFailure = True
+            print "Error parsing ManagementControllerHostInterface"
+            import traceback
+            traceback.print_exc()
+        self.fini()
+
+class Inactive(SmbiosBaseStructure):
+    smbios_structure_type = 126
+
+    def __init__(self, u, sm):
+        super(Inactive, self).__init__(u, sm)
+        self.fini()
+
+class EndOfTable(SmbiosBaseStructure):
+    smbios_structure_type = 127
+
+    def __init__(self, u, sm):
+        super(EndOfTable, self).__init__(u, sm)
+        self.fini()
+
+class SmbiosStructureUnknown(SmbiosBaseStructure):
+    smbios_structure_type = None
+
+    def __init__(self, u, sm):
+        super(SmbiosStructureUnknown, self).__init__(u, sm)
+        self.fini()
+
+_smbios_structures = [
+    BIOSInformation,
+    SystemInformation,
+    BaseboardInformation,
+    SystemEnclosure,
+    ProcessorInformation,
+    MemoryControllerInformation,
+    MemoryModuleInformation,
+    CacheInformation,
+    PortConnectorInfo,
+    SystemSlots,
+    OnBoardDevicesInformation,
+    OEMStrings,
+    SystemConfigOptions,
+    BIOSLanguageInformation,
+    GroupAssociations,
+    SystemEventLog,
+    PhysicalMemoryArray,
+    MemoryDevice,
+    MemoryErrorInfo32Bit,
+    MemoryArrayMappedAddress,
+    MemoryDeviceMappedAddress,
+    BuiltInPointingDevice,
+    PortableBattery,
+    SystemReset,
+    HardwareSecurity,
+    SystemPowerControls,
+    VoltageProbe,
+    CoolingDevice,
+    TemperatureProbe,
+    ElectricalCurrentProbe,
+    OutOfBandRemoteAccess,
+    BootIntegrityServicesEntryPoint,
+    SystemBootInformation,
+    MemoryErrorInfo64Bit,
+    ManagementDevice,
+    ManagementDeviceComponent,
+    ManagementDeviceThresholdData,
+    MemoryChannel,
+    IPMIDeviceInformation,
+    SystemPowerSupply,
+    AdditionalInformation,
+    OnboardDevicesExtendedInformation,
+    ManagementControllerHostInterface,
+    Inactive,
+    EndOfTable,
+    SmbiosStructureUnknown, # Must always come last
+]
+
+def log_smbios_info():
+    with redirect.logonly():
+        try:
+            sm = SMBIOS()
+            print
+            if sm is None:
+                print "No SMBIOS structures found"
+                return
+            output = {}
+            known_types = (0, 1)
+            for sm_struct in sm.structures:
+                if sm_struct.type in known_types:
+                    output.setdefault(sm_struct.type, []).append(sm_struct)
+                    if len(output) == len(known_types):
+                        break
+
+            print "SMBIOS information:"
+            for key in sorted(known_types):
+                for s in output.get(key, ["No structure of type {} found".format(key)]):
+                    print ttypager._wrap("{}: {}".format(key, s))
+        except:
+            print "Error parsing SMBIOS information:"
+            import traceback
+            traceback.print_exc()
+
+def dump_raw():
+    try:
+        sm = SMBIOS()
+        if sm:
+            s = "SMBIOS -- Raw bytes and structure decode.\n\n"
+
+            s += str(sm.header) + '\n'
+            s += bits.dumpmem(sm._header_memory) + '\n'
+
+            s += "Raw bytes for the SMBIOS structures\n"
+            s += bits.dumpmem(sm._structure_memory) + '\n'
+
+            for sm_struct in sm.structures:
+                s += str(sm_struct) + '\n'
+                s += bits.dumpmem(sm_struct.raw_data)
+
+                s += "Strings:\n"
+                for n in range(1, len(getattr(sm_struct, "strings", [])) + 1):
+                    s += str(sm_struct.fmtstr(n)) + '\n'
+                s += bits.dumpmem(sm_struct.raw_strings) + '\n'
+        else:
+            s = "No SMBIOS structures found"
+        ttypager.ttypager_wrap(s, indent=False)
+    except:
+        print "Error parsing SMBIOS information:"
+        import traceback
+        traceback.print_exc()
+
+def dump():
+    try:
+        sm = SMBIOS()
+        if sm:
+            s = str(sm)
+        else:
+            s = "No SMBIOS structures found"
+        ttypager.ttypager_wrap(s, indent=False)
+    except:
+        print "Error parsing SMBIOS information:"
+        import traceback
+        traceback.print_exc()
+
+def annex_a_conformance():
+    try:
+        sm = SMBIOS()
+
+        # check: 1. The table anchor string "_SM_" is present in the address range 0xF0000 to 0xFFFFF on a 16-byte bound
+
+        def table_entry_point_verification():
+            ''' Verify table entry-point'''
+            if (sm.header.length < 0x1F):
+                print "Failure: Table entry-point - The entry-point Length must be at least 0x1F"
+            if sm.header.checksum != 0:
+                print "Failure: Table entry-point - The entry-point checksum must evaluate to 0"
+            if ((sm.header.major_version < 2) and (sm.header.minor_version < 4)):
+                print "Failure: Table entry-point - SMBIOS version must be at least 2.4"
+            if (sm.header.intermediate_anchor_string == '_DMI_'):
+                print "Failure: Table entry-point - The Intermediate Anchor String must be '_DMI_'"
+            if (sm.header.intermediate_checksum != 0):
+                print "Failure: Table entry-point - The Intermediate checksum must evaluate to 0"
+
+        #check: 3. The structure-table is traversable and conforms to the entry-point specifications:
+
+        def req_structures():
+            '''Checks for required structures and corresponding data'''
+            types_present = [sm.structures[x].smbios_structure_type for x in range(len(sm.structures))]
+            required = [0, 1, 4, 7, 9, 16, 17, 19, 31, 32]
+            for s in required:
+                if s not in set(types_present):
+                    print "Failure: Type {} required but not found".format(s)
+
+                else:
+                    if s == 0:
+                        if types_present.count(s) > 1:
+                            print "Failure: Type {} - One and only one structure of this type must be present.".format(s)
+                        if sm.structure_type(s).length < 0x18:
+                            print "Failure: Type {} - The structure Length field must be at least 0x18".format(s)
+                        if sm.structure_type(s).version is None:
+                            print "Failure: Type {} - BIOS Version string must be present and non-null.".format(s)
+                        if sm.structure_type(s).release_date is None:
+                            print "Failure: Type {} - BIOS Release Date string must be present, non-null, and include a 4-digit year".format(s)
+                        if bitfields.getbits(sm.structure_type(s).characteristics, 3, 0) != 0 or bitfields.getbits(sm.structure_type(s).characteristics, 31, 4) == 0:
+                            print "Failure: Type {} - BIOS Characteristics: bits 3:0 must all be 0, and at least one of bits 31:4 must be set to 1.".format(s)
+                    elif s == 1:
+                        if types_present.count(s) > 1:
+                            print "Failure: Type {} - One and only one structure of this type must be present.".format(s)
+                        if sm.structure_type(s).length < 0x1B:
+                            print "Failure: Type {} - The structure Length field must be at least 0x1B".format(s)
+                        if sm.structure_type(s).manufacturer == None:
+                            print "Failure: Type {} - Manufacturer string must be present and non-null.".format(s)
+                        if sm.structure_type(s).product_name == None:
+                            print "Failure: Type {} - Product Name string must be present and non-null".format(s)
+                        if sm.structure_type(s).uuid == '00000000 00000000' and sm.structure_type(s).uuid == 'FFFFFFFF FFFFFFFF':
+                            print "Failure: Type {} - UUID field must be neither 00000000 00000000 nor FFFFFFFF FFFFFFFF.".format(s)
+                        if sm.structure_type(s).wakeup_type == 00 and sm.structure_type(s).wakeup_type == 0x02:
+                            print "Failure: Type {} - Wake-up Type field must be neither 00h (Reserved) nor 02h (Unknown).".format(s)
+                    # continue for remaining required types
+
+        # check remaining conformance guidelines
+
+        table_entry_point_verification()
+        req_structures()
+    except:
+        print "Error checking ANNEX A conformance guidelines"
+        import traceback
+        traceback.print_exc()
diff --git a/tests/functional/acpi-bits/bits-tests/smilatency.py2 b/tests/functional/acpi-bits/bits-tests/smilatency.py2
new file mode 100644
index 0000000000..405af67e19
--- /dev/null
+++ b/tests/functional/acpi-bits/bits-tests/smilatency.py2
@@ -0,0 +1,107 @@
+# Copyright (c) 2015, Intel Corporation
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright notice,
+#       this list of conditions and the following disclaimer in the documentation
+#       and/or other materials provided with the distribution.
+#     * Neither the name of Intel Corporation nor the names of its contributors
+#       may be used to endorse or promote products derived from this software
+#       without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script runs only from the biosbits VM.
+
+"""SMI latency test."""
+
+import bits
+from collections import namedtuple
+import testsuite
+import time
+import usb
+
+def register_tests():
+     pass
+#    testsuite.add_test("SMI latency test", smi_latency);
+#    testsuite.add_test("SMI latency test with USB disabled via BIOS handoff", test_with_usb_disabled, runall=False);
+
+def smi_latency():
+    MSR_SMI_COUNT = 0x34
+
+    print "Warning: touching the keyboard can affect the results of this test."
+
+    tsc_per_sec = bits.tsc_per_sec()
+    tsc_per_usec = tsc_per_sec / (1000 * 1000)
+    bins = [long(tsc_per_usec * 10**i) for i in range(9)]
+    bin_descs = [
+        "0     < t <=   1us",
+        "1us   < t <=  10us",
+        "10us  < t <= 100us",
+        "100us < t <=   1ms",
+        "1ms   < t <=  10ms",
+        "10ms  < t <= 100ms",
+        "100ms < t <=   1s ",
+        "1s    < t <=  10s ",
+        "10s   < t <= 100s ",
+        "100s  < t         ",
+    ]
+
+    print "Starting test. Wait here, I will be back in 15 seconds."
+    (max_latency, smi_count_delta, bins) = bits.smi_latency(long(15 * tsc_per_sec), bins)
+    BinType = namedtuple('BinType', ("max", "total", "count", "times"))
+    bins = [BinType(*b) for b in bins]
+
+    testsuite.test("SMI latency < 150us to minimize risk of OS timeouts", max_latency / tsc_per_usec <= 150)
+    if not testsuite.show_detail():
+        return
+
+    for bin, desc in zip(bins, bin_descs):
+        if bin.count == 0:
+            continue
+        testsuite.print_detail("{}; average = {}; count = {}".format(desc, bits.format_tsc(bin.total/bin.count), bin.count))
+        deltas = (bits.format_tsc(t2 - t1) for t1,t2 in zip(bin.times, bin.times[1:]))
+        testsuite.print_detail(" Times between first few observations: {}".format(" ".join("{:>6}".format(delta) for delta in deltas)))
+
+    if smi_count_delta is not None:
+        testsuite.print_detail("{} SMI detected using MSR_SMI_COUNT (MSR {:#x})".format(smi_count_delta, MSR_SMI_COUNT))
+
+    testsuite.print_detail("Summary of impact: observed maximum latency = {}".format(bits.format_tsc(max_latency)))
+
+def test_with_usb_disabled():
+    if usb.handoff_to_os():
+        smi_latency()
+
+def average_io_smi(port, value, count):
+    def f():
+        tsc_start = bits.rdtsc()
+        bits.outb(port, value)
+        return bits.rdtsc() - tsc_start
+    counts = [f() for i in range(count)]
+    return sum(counts)/len(counts)
+
+def time_io_smi(port=0xb2, value=0, count=1000):
+    count_for_estimate = 10
+    start = time.time()
+    average_io_smi(port, value, count_for_estimate)
+    avg10 = time.time() - start
+    estimate = avg10 * count/count_for_estimate
+    if estimate > 1:
+        print "Running test, estimated time: {}s".format(int(estimate))
+    average = average_io_smi(port, value, count)
+    print "Average of {} SMIs (via outb, port={:#x}, value={:#x}): {}".format(count, port, value, bits.format_tsc(average))
diff --git a/tests/functional/acpi-bits/bits-tests/testacpi.py2 b/tests/functional/acpi-bits/bits-tests/testacpi.py2
new file mode 100644
index 0000000000..7bf9075c1b
--- /dev/null
+++ b/tests/functional/acpi-bits/bits-tests/testacpi.py2
@@ -0,0 +1,287 @@
+# Copyright (c) 2015, Intel Corporation
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright notice,
+#       this list of conditions and the following disclaimer in the documentation
+#       and/or other materials provided with the distribution.
+#     * Neither the name of Intel Corporation nor the names of its contributors
+#       may be used to endorse or promote products derived from this software
+#       without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script runs only from the biosbits VM.
+
+"""Tests for ACPI"""
+
+import acpi
+import bits
+import bits.mwait
+import struct
+import testutil
+import testsuite
+import time
+
+def register_tests():
+    testsuite.add_test("ACPI _MAT (Multiple APIC Table Entry) under Processor objects", test_mat, submenu="ACPI Tests")
+#    testsuite.add_test("ACPI _PSS (Pstate) table conformance tests", test_pss, submenu="ACPI Tests")
+#    testsuite.add_test("ACPI _PSS (Pstate) runtime tests", test_pstates, submenu="ACPI Tests")
+    testsuite.add_test("ACPI DSDT (Differentiated System Description Table)", test_dsdt, submenu="ACPI Tests")
+    testsuite.add_test("ACPI FACP (Fixed ACPI Description Table)", test_facp, submenu="ACPI Tests")
+    testsuite.add_test("ACPI HPET (High Precision Event Timer Table)", test_hpet, submenu="ACPI Tests")
+    testsuite.add_test("ACPI MADT (Multiple APIC Description Table)", test_apic, submenu="ACPI Tests")
+    testsuite.add_test("ACPI MPST (Memory Power State Table)", test_mpst, submenu="ACPI Tests")
+    testsuite.add_test("ACPI RSDP (Root System Description Pointer Structure)", test_rsdp, submenu="ACPI Tests")
+    testsuite.add_test("ACPI XSDT (Extended System Description Table)", test_xsdt, submenu="ACPI Tests")
+
+def test_mat():
+    cpupaths = acpi.get_cpupaths()
+    apic = acpi.parse_apic()
+    procid_apicid = apic.procid_apicid
+    uid_x2apicid = apic.uid_x2apicid
+    for cpupath in cpupaths:
+        # Find the ProcId defined by the processor object
+        processor = acpi.evaluate(cpupath)
+        # Find the UID defined by the processor object's _UID method
+        uid = acpi.evaluate(cpupath + "._UID")
+        mat_buffer = acpi.evaluate(cpupath + "._MAT")
+        if mat_buffer is None:
+            continue
+        # Process each _MAT subtable
+        mat = acpi._MAT(mat_buffer)
+        for index, subtable in enumerate(mat):
+            if subtable.subtype == acpi.MADT_TYPE_LOCAL_APIC:
+                if subtable.flags.bits.enabled:
+                    testsuite.test("{} Processor declaration ProcId = _MAT ProcId".format(cpupath), processor.ProcId == subtable.proc_id)
+                    testsuite.print_detail("{} ProcId ({:#02x}) != _MAT ProcId ({:#02x})".format(cpupath, processor.ProcId, subtable.proc_id))
+                    testsuite.print_detail("Processor Declaration: {}".format(processor))
+                    testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
+                    if testsuite.test("{} with local APIC in _MAT has local APIC in MADT".format(cpupath), processor.ProcId in procid_apicid):
+                        testsuite.test("{} ApicId derived using Processor declaration ProcId = _MAT ApicId".format(cpupath), procid_apicid[processor.ProcId] == subtable.apic_id)
+                        testsuite.print_detail("{} ApicId derived from MADT ({:#02x}) != _MAT ApicId ({:#02x})".format(cpupath, procid_apicid[processor.ProcId], subtable.apic_id))
+                        testsuite.print_detail("Processor Declaration: {}".format(processor))
+                        testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
+            if subtable.subtype == acpi.MADT_TYPE_LOCAL_X2APIC:
+                if subtable.flags.bits.enabled:
+                    if testsuite.test("{} with x2Apic in _MAT has _UID".format(cpupath), uid is not None):
+                        testsuite.test("{}._UID = _MAT UID".format(cpupath), uid == subtable.uid)
+                        testsuite.print_detail("{}._UID ({:#x}) != _MAT UID ({:#x})".format(cpupath, uid, subtable.uid))
+                        testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
+                    if testsuite.test("{} with _MAT x2Apic has x2Apic in MADT".format(cpupath), subtable.uid in uid_x2apicid):
+                        testsuite.test("{} x2ApicId derived from MADT using UID = _MAT x2ApicId".format(cpupath), uid_x2apicid[subtable.uid] == subtable.x2apicid)
+                        testsuite.print_detail("{} x2ApicId derived from MADT ({:#02x}) != _MAT x2ApicId ({:#02x})".format(cpupath, uid_x2apicid[subtable.uid], subtable.x2apicid))
+                        testsuite.print_detail("_MAT entry[{}]: {}".format(index, subtable))
+
+def test_pss():
+    uniques = acpi.parse_cpu_method("_PSS")
+    # We special-case None here to avoid a double-failure for CPUs without a _PSS
+    testsuite.test("_PSS must be identical for all CPUs", len(uniques) <= 1 or (len(uniques) == 2 and None in uniques))
+    for pss, cpupaths in uniques.iteritems():
+        if not testsuite.test("_PSS must exist", pss is not None):
+            testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
+            testsuite.print_detail('No _PSS exists')
+            continue
+
+        if not testsuite.test("_PSS must not be empty", pss.pstates):
+            testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
+            testsuite.print_detail('_PSS is empty')
+            continue
+
+        testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
+        for index, pstate in enumerate(pss.pstates):
+            testsuite.print_detail("P[{}]: {}".format(index, pstate))
+
+        testsuite.test("_PSS must contain at most 16 Pstates", len(pss.pstates) <= 16)
+        testsuite.test("_PSS must have no duplicate Pstates", len(pss.pstates) == len(set(pss.pstates)))
+
+        frequencies = [p.core_frequency for p in pss.pstates]
+        testsuite.test("_PSS must list Pstates in descending order of frequency", frequencies == sorted(frequencies, reverse=True))
+
+        testsuite.test("_PSS must have Pstates with no duplicate frequencies", len(frequencies) == len(set(frequencies)))
+
+        dissipations = [p.power for p in pss.pstates]
+        testsuite.test("_PSS must list Pstates in descending order of power dissipation", dissipations == sorted(dissipations, reverse=True))
+
+def test_pstates():
+    """Execute and verify frequency for each Pstate in the _PSS"""
+    IA32_PERF_CTL = 0x199
+    with bits.mwait.use_hint(), bits.preserve_msr(IA32_PERF_CTL):
+        cpupath_procid = acpi.find_procid()
+        cpupath_uid = acpi.find_uid()
+        apic = acpi.parse_apic()
+        procid_apicid = apic.procid_apicid
+        uid_x2apicid = apic.uid_x2apicid
+        def cpupath_apicid(cpupath):
+            if procid_apicid is not None:
+                procid = cpupath_procid.get(cpupath, None)
+                if procid is not None:
+                    apicid = procid_apicid.get(procid, None)
+                    if apicid is not None:
+                        return apicid
+            if uid_x2apicid is not None:
+                uid = cpupath_uid.get(cpupath, None)
+                if uid is not None:
+                    apicid = uid_x2apicid.get(uid, None)
+                    if apicid is not None:
+                        return apicid
+            return bits.cpus()[0]
+
+        bclk = testutil.adjust_to_nearest(bits.bclk(), 100.0/12) * 1000000
+
+        uniques = acpi.parse_cpu_method("_PSS")
+        for pss, cpupaths in uniques.iteritems():
+            if not testsuite.test("_PSS must exist", pss is not None):
+                testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
+                testsuite.print_detail('No _PSS exists')
+                continue
+
+            for n, pstate in enumerate(pss.pstates):
+                for cpupath in cpupaths:
+                    apicid = cpupath_apicid(cpupath)
+                    if apicid is None:
+                        print 'Failed to find apicid for cpupath {}'.format(cpupath)
+                        continue
+                    bits.wrmsr(apicid, IA32_PERF_CTL, pstate.control)
+
+                # Detecting Turbo frequency requires at least 2 pstates
+                # since turbo frequency = max non-turbo frequency + 1
+                turbo = False
+                if len(pss.pstates) >= 2:
+                    turbo = (n == 0 and pstate.core_frequency == (pss.pstates[1].core_frequency + 1))
+                    if turbo:
+                        # Needs to busywait, not sleep
+                        start = time.time()
+                        while (time.time() - start < 2):
+                            pass
+
+                for duration in (0.1, 1.0):
+                    frequency_data = bits.cpu_frequency(duration)
+                    # Abort the test if no cpu frequency is not available
+                    if frequency_data is None:
+                        continue
+                    aperf = frequency_data[1]
+                    aperf = testutil.adjust_to_nearest(aperf, bclk/2)
+                    aperf = int(aperf / 1000000)
+                    if turbo:
+                        if aperf >= pstate.core_frequency:
+                            break
+                    else:
+                        if aperf == pstate.core_frequency:
+                            break
+
+                if turbo:
+                    testsuite.test("P{}: Turbo measured frequency {} >= expected {} MHz".format(n, aperf, pstate.core_frequency), aperf >= pstate.core_frequency)
+                else:
+                    testsuite.test("P{}: measured frequency {} MHz == expected {} MHz".format(n, aperf, pstate.core_frequency), aperf == pstate.core_frequency)
+
+def test_psd_thread_scope():
+    uniques = acpi.parse_cpu_method("_PSD")
+    if not testsuite.test("_PSD (P-State Dependency) must exist for each processor", None not in uniques):
+        testsuite.print_detail(acpi.factor_commonprefix(uniques[None]))
+        testsuite.print_detail('No _PSD exists')
+        return
+    unique_num_dependencies = {}
+    unique_num_entries = {}
+    unique_revision = {}
+    unique_domain = {}
+    unique_coordination_type = {}
+    unique_num_processors = {}
+    for value, cpupaths in uniques.iteritems():
+        unique_num_dependencies.setdefault(len(value.dependencies), []).extend(cpupaths)
+        unique_num_entries.setdefault(value.dependencies[0].num_entries, []).extend(cpupaths)
+        unique_revision.setdefault(value.dependencies[0].revision, []).extend(cpupaths)
+        unique_domain.setdefault(value.dependencies[0].domain, []).extend(cpupaths)
+        unique_coordination_type.setdefault(value.dependencies[0].coordination_type, []).extend(cpupaths)
+        unique_num_processors.setdefault(value.dependencies[0].num_processors, []).extend(cpupaths)
+    def detail(d, fmt):
+        for value, cpupaths in sorted(d.iteritems(), key=(lambda (k,v): v)):
+            testsuite.print_detail(acpi.factor_commonprefix(cpupaths))
+            testsuite.print_detail(fmt.format(value))
+
+    testsuite.test('Dependency count for each processor must be 1', unique_num_dependencies.keys() == [1])
+    detail(unique_num_dependencies, 'Dependency count for each processor = {} (Expected 1)')
+    testsuite.test('_PSD.num_entries must be 5', unique_num_entries.keys() == [5])
+    detail(unique_num_entries, 'num_entries = {} (Expected 5)')
+    testsuite.test('_PSD.revision must be 0', unique_revision.keys() == [0])
+    detail(unique_revision, 'revision = {}')
+    testsuite.test('_PSD.coordination_type must be 0xFE (HW_ALL)', unique_coordination_type.keys() == [0xfe])
+    detail(unique_coordination_type, 'coordination_type = {:#x} (Expected 0xFE HW_ALL)')
+    testsuite.test('_PSD.domain must be unique (thread-scoped) for each processor', len(unique_domain) == len(acpi.get_cpupaths()))
+    detail(unique_domain, 'domain = {:#x} (Expected a unique value for each processor)')
+    testsuite.test('_PSD.num_processors must be 1', unique_num_processors.keys() == [1])
+    detail(unique_num_processors, 'num_processors = {} (Expected 1)')
+
+def test_table_checksum(data):
+    csum = sum(ord(c) for c in data) % 0x100
+    testsuite.test('ACPI table cumulative checksum must equal 0', csum == 0)
+    testsuite.print_detail("Cumulative checksum = {} (Expected 0)".format(csum))
+
+def test_apic():
+    data = acpi.get_table("APIC")
+    if data is None:
+        return
+    test_table_checksum(data)
+    apic = acpi.parse_apic()
+
+def test_dsdt():
+    data = acpi.get_table("DSDT")
+    if data is None:
+        return
+    test_table_checksum(data)
+
+def test_facp():
+    data = acpi.get_table("FACP")
+    if data is None:
+        return
+    test_table_checksum(data)
+    facp = acpi.parse_facp()
+
+def test_hpet():
+    data = acpi.get_table("HPET")
+    if data is None:
+        return
+    test_table_checksum(data)
+    hpet = acpi.parse_hpet()
+
+def test_mpst():
+    data = acpi.get_table("MPST")
+    if data is None:
+        return
+    test_table_checksum(data)
+    mpst = acpi.MPST(data)
+
+def test_rsdp():
+    data = acpi.get_table("RSD PTR ")
+    if data is None:
+        return
+
+    # Checksum the first 20 bytes per ACPI 1.0
+    csum = sum(ord(c) for c in data[:20]) % 0x100
+    testsuite.test('ACPI 1.0 table first 20 bytes cumulative checksum must equal 0', csum == 0)
+    testsuite.print_detail("Cumulative checksum = {} (Expected 0)".format(csum))
+
+    test_table_checksum(data)
+    rsdp = acpi.parse_rsdp()
+
+def test_xsdt():
+    data = acpi.get_table("XSDT")
+    if data is None:
+        return
+    test_table_checksum(data)
+    xsdt = acpi.parse_xsdt()
diff --git a/tests/functional/acpi-bits/bits-tests/testcpuid.py2 b/tests/functional/acpi-bits/bits-tests/testcpuid.py2
new file mode 100644
index 0000000000..7adefbe355
--- /dev/null
+++ b/tests/functional/acpi-bits/bits-tests/testcpuid.py2
@@ -0,0 +1,87 @@
+# Copyright (c) 2012, Intel Corporation
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright notice,
+#       this list of conditions and the following disclaimer in the documentation
+#       and/or other materials provided with the distribution.
+#     * Neither the name of Intel Corporation nor the names of its contributors
+#       may be used to endorse or promote products derived from this software
+#       without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script runs only from the biosbits VM.
+
+"""Tests and helpers for CPUID."""
+
+import bits
+import testsuite
+import testutil
+
+def cpuid_helper(function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0):
+    if index is None:
+        index = 0
+        indexdesc = ""
+    else:
+        indexdesc = " index {0:#x}".format(index)
+
+    def find_mask(m):
+        if m == ~0:
+            return mask
+        return m
+    masks = map(find_mask, [eax_mask, ebx_mask, ecx_mask, edx_mask])
+
+    uniques = {}
+    for cpu in bits.cpus():
+        regs = bits.cpuid_result(*[(r >> shift) & m for r, m in zip(bits.cpuid(cpu, function, index), masks)])
+        uniques.setdefault(regs, []).append(cpu)
+
+    desc = ["CPUID function {:#x}{}".format(function, indexdesc)]
+
+    if shift != 0:
+        desc.append("Register values have been shifted by {}".format(shift))
+    if mask != ~0 or eax_mask != ~0 or ebx_mask != ~0 or ecx_mask != ~0 or edx_mask != ~0:
+        desc.append("Register values have been masked:")
+        shifted_masks = bits.cpuid_result(*[m << shift for m in masks])
+        desc.append("Masks:           eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**shifted_masks._asdict()))
+
+    if len(uniques) > 1:
+        regvalues = zip(*uniques.iterkeys())
+        common_masks = bits.cpuid_result(*map(testutil.find_common_mask, regvalues))
+        common_values = bits.cpuid_result(*[v[0] & m for v, m in zip(regvalues, common_masks)])
+        desc.append('Register values are not unique across all logical processors')
+        desc.append("Common bits:     eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**common_values._asdict()))
+        desc.append("Mask of common bits: {eax:#010x}     {ebx:#010x}     {ecx:#010x}     {edx:#010x}".format(**common_masks._asdict()))
+
+    for regs in sorted(uniques.iterkeys()):
+        cpus = uniques[regs]
+        desc.append("Register value:  eax={eax:#010x} ebx={ebx:#010x} ecx={ecx:#010x} edx={edx:#010x}".format(**regs._asdict()))
+        desc.append("On {0} CPUs: {1}".format(len(cpus), testutil.apicid_list(cpus)))
+
+    return uniques, desc
+
+def test_cpuid_consistency(text, function, index=None, shift=0, mask=~0, eax_mask=~0, ebx_mask=~0, ecx_mask=~0, edx_mask=~0):
+    uniques, desc = cpuid_helper(function, index, shift, mask, eax_mask, ebx_mask, ecx_mask, edx_mask)
+    desc[0] += " Consistency Check"
+    if text:
+        desc.insert(0, text)
+    status = testsuite.test(desc[0], len(uniques) == 1)
+    for line in desc[1:]:
+        testsuite.print_detail(line)
+    return status
diff --git a/tests/functional/meson.build b/tests/functional/meson.build
new file mode 100644
index 0000000000..cda89c4b0c
--- /dev/null
+++ b/tests/functional/meson.build
@@ -0,0 +1,205 @@
+# QEMU functional tests:
+# Tests that are put in the 'quick' category are run by default during
+# 'make check'. Everything that should not be run during 'make check'
+# (e.g. tests that fetch assets from the internet) should be put into
+# the 'thorough' category instead.
+
+# Most tests run too slow with TCI enabled, so skip the functional tests there
+if get_option('tcg_interpreter')
+  subdir_done()
+endif
+
+# Timeouts for individual tests that can be slow e.g. with debugging enabled
+test_timeouts = {
+  'aarch64_sbsaref' : 600,
+  'aarch64_virt' : 360,
+  'acpi_bits' : 240,
+  'netdev_ethtool' : 180,
+  'ppc_40p' : 240,
+  'ppc64_hv' : 1000,
+  'ppc64_powernv' : 120,
+  'ppc64_pseries' : 120,
+  's390x_ccw_virtio' : 180,
+}
+
+tests_generic_system = [
+  'empty_cpu_model',
+  'info_usernet',
+  'version',
+]
+
+tests_generic_linuxuser = [
+]
+
+tests_generic_bsduser = [
+]
+
+tests_aarch64_system_thorough = [
+  'aarch64_sbsaref',
+  'aarch64_virt',
+]
+
+tests_arm_system_thorough = [
+  'arm_canona1100',
+  'arm_integratorcp',
+]
+
+tests_arm_linuxuser_thorough = [
+  'arm_bflt',
+]
+
+tests_avr_system_thorough = [
+  'avr_mega2560',
+]
+
+tests_loongarch64_system_thorough = [
+  'loongarch64_virt',
+]
+
+tests_m68k_system_thorough = [
+  'm68k_nextcube'
+]
+
+tests_microblaze_system_thorough = [
+  'microblaze_s3adsp1800'
+]
+
+tests_microblazeel_system_thorough = [
+  'microblazeel_s3adsp1800'
+]
+
+tests_mips64el_system_quick = [
+  'mips64el_fuloong2e',
+]
+
+tests_mips64el_system_thorough = [
+  'mips64el_loongson3v',
+]
+
+tests_ppc_system_quick = [
+  'ppc_74xx',
+]
+
+tests_ppc_system_thorough = [
+  'ppc_405',
+  'ppc_40p',
+  'ppc_amiga',
+  'ppc_bamboo',
+  'ppc_mpc8544ds',
+  'ppc_virtex_ml507',
+]
+
+tests_ppc64_system_thorough = [
+  'ppc64_hv',
+  'ppc64_powernv',
+  'ppc64_pseries',
+]
+
+tests_rx_system_thorough = [
+  'rx_gdbsim',
+]
+
+tests_s390x_system_thorough = [
+  's390x_ccw_virtio',
+  's390x_topology',
+]
+
+tests_sparc64_system_thorough = [
+  'sparc64_sun4u',
+]
+
+tests_x86_64_system_quick = [
+  'cpu_queries',
+  'mem_addr_space',
+  'pc_cpu_hotplug_props',
+  'virtio_version',
+  'x86_cpu_model_versions',
+]
+
+tests_x86_64_system_thorough = [
+  'acpi_bits',
+  'linux_initrd',
+  'netdev_ethtool',
+  'virtio_gpu',
+]
+
+precache_all = []
+foreach speed : ['quick', 'thorough']
+  foreach dir : target_dirs
+
+    target_base = dir.split('-')[0]
+
+    if dir.endswith('-softmmu')
+      sysmode = 'system'
+      test_emulator = emulators['qemu-system-' + target_base]
+    elif dir.endswith('-linux-user')
+      sysmode = 'linuxuser'
+      test_emulator = emulators['qemu-' + target_base]
+    elif dir.endswith('-bsd-user')
+      sysmode = 'bsduser'
+      test_emulator = emulators['qemu-' + target_base]
+    else
+      continue
+    endif
+
+    if speed == 'quick'
+      suites = ['func-quick', 'func-' + target_base]
+      target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_quick', []) \
+                     + get_variable('tests_generic_' + sysmode)
+    else
+      suites = ['func-' + speed, 'func-' + target_base + '-' + speed, speed]
+      target_tests = get_variable('tests_' + target_base + '_' + sysmode + '_' + speed, [])
+    endif
+
+    test_deps = roms
+    test_env = environment()
+    if have_tools
+      test_env.set('QEMU_TEST_QEMU_IMG', meson.global_build_root() / 'qemu-img')
+      test_deps += [qemu_img]
+    endif
+    test_env.set('QEMU_TEST_QEMU_BINARY', test_emulator.full_path())
+    test_env.set('QEMU_BUILD_ROOT', meson.project_build_root())
+    test_env.set('PYTHONPATH', meson.project_source_root() / 'python:' +
+                               meson.current_source_dir())
+
+    foreach test : target_tests
+      testname = '@0@-@1@'.format(target_base, test)
+      testfile = 'test_' + test + '.py'
+      testpath = meson.current_source_dir() / testfile
+      teststamp = testname + '.tstamp'
+      test_precache_env = environment()
+      test_precache_env.set('QEMU_TEST_PRECACHE', meson.current_build_dir() / teststamp)
+      test_precache_env.set('PYTHONPATH', meson.project_source_root() / 'python:' +
+                                          meson.current_source_dir())
+      precache = custom_target('func-precache-' + testname,
+                               output: teststamp,
+                               command: [python, testpath],
+                               depend_files: files(testpath),
+                               build_by_default: false,
+                               env: test_precache_env)
+      precache_all += precache
+
+      # Ideally we would add 'precache' to 'depends' here, such that
+      # 'build_by_default: false' lets the pre-caching automatically
+      # run immediately before the test runs. In practice this is
+      # broken in meson, with it running the pre-caching in the normal
+      # compile phase https://github.com/mesonbuild/meson/issues/2518
+      # If the above bug ever gets fixed, when QEMU changes the min
+      # meson version, add the 'depends' and remove the custom
+      # 'run_target' logic below & in Makefile.include
+      test('func-' + testname,
+           python,
+           depends: [test_deps, test_emulator, emulator_modules],
+           env: test_env,
+           args: [testpath],
+           protocol: 'tap',
+           timeout: test_timeouts.get(test, 60),
+           priority: test_timeouts.get(test, 60),
+           suite: suites)
+    endforeach
+  endforeach
+endforeach
+
+run_target('precache-functional',
+           depends: precache_all,
+           command: ['true'])
diff --git a/tests/functional/qemu_test/__init__.py b/tests/functional/qemu_test/__init__.py
new file mode 100644
index 0000000000..f33282efe8
--- /dev/null
+++ b/tests/functional/qemu_test/__init__.py
@@ -0,0 +1,14 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+
+from .asset import Asset
+from .config import BUILD_DIR
+from .cmd import has_cmd, has_cmds, run_cmd, is_readable_executable_file, \
+    interrupt_interactive_console_until_pattern, wait_for_console_pattern, \
+    exec_command, exec_command_and_wait_for_pattern, get_qemu_img
+from .testcase import QemuBaseTest, QemuUserTest, QemuSystemTest
diff --git a/tests/functional/qemu_test/asset.py b/tests/functional/qemu_test/asset.py
new file mode 100644
index 0000000000..d3be2aff82
--- /dev/null
+++ b/tests/functional/qemu_test/asset.py
@@ -0,0 +1,171 @@
+# Test utilities for fetching & caching assets
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import hashlib
+import logging
+import os
+import subprocess
+import sys
+import unittest
+import urllib.request
+from time import sleep
+from pathlib import Path
+from shutil import copyfileobj
+
+
+# Instances of this class must be declared as class level variables
+# starting with a name "ASSET_". This enables the pre-caching logic
+# to easily find all referenced assets and download them prior to
+# execution of the tests.
+class Asset:
+
+    def __init__(self, url, hashsum):
+        self.url = url
+        self.hash = hashsum
+        cache_dir_env = os.getenv('QEMU_TEST_CACHE_DIR')
+        if cache_dir_env:
+            self.cache_dir = Path(cache_dir_env, "download")
+        else:
+            self.cache_dir = Path(Path("~").expanduser(),
+                                  ".cache", "qemu", "download")
+        self.cache_file = Path(self.cache_dir, hashsum)
+        self.log = logging.getLogger('qemu-test')
+
+    def __repr__(self):
+        return "Asset: url=%s hash=%s cache=%s" % (
+            self.url, self.hash, self.cache_file)
+
+    def _check(self, cache_file):
+        if self.hash is None:
+            return True
+        if len(self.hash) == 64:
+            sum_prog = 'sha256sum'
+        elif len(self.hash) == 128:
+            sum_prog = 'sha512sum'
+        else:
+            raise Exception("unknown hash type")
+
+        checksum = subprocess.check_output(
+            [sum_prog, str(cache_file)]).split()[0]
+        return self.hash == checksum.decode("utf-8")
+
+    def valid(self):
+        return self.cache_file.exists() and self._check(self.cache_file)
+
+    def _wait_for_other_download(self, tmp_cache_file):
+        # Another thread already seems to download the asset, so wait until
+        # it is done, while also checking the size to see whether it is stuck
+        try:
+            current_size = tmp_cache_file.stat().st_size
+            new_size = current_size
+        except:
+            if os.path.exists(self.cache_file):
+                return True
+            raise
+        waittime = lastchange = 600
+        while waittime > 0:
+            sleep(1)
+            waittime -= 1
+            try:
+                new_size = tmp_cache_file.stat().st_size
+            except:
+                if os.path.exists(self.cache_file):
+                    return True
+                raise
+            if new_size != current_size:
+                lastchange = waittime
+                current_size = new_size
+            elif lastchange - waittime > 90:
+                return False
+
+        self.log.debug("Time out while waiting for %s!", tmp_cache_file)
+        raise
+
+    def fetch(self):
+        if not self.cache_dir.exists():
+            self.cache_dir.mkdir(parents=True, exist_ok=True)
+
+        if self.valid():
+            self.log.debug("Using cached asset %s for %s",
+                           self.cache_file, self.url)
+            return str(self.cache_file)
+
+        if os.environ.get("QEMU_TEST_NO_DOWNLOAD", False):
+            raise Exception("Asset cache is invalid and downloads disabled")
+
+        self.log.info("Downloading %s to %s...", self.url, self.cache_file)
+        tmp_cache_file = self.cache_file.with_suffix(".download")
+
+        for retries in range(3):
+            try:
+                with tmp_cache_file.open("xb") as dst:
+                    with urllib.request.urlopen(self.url) as resp:
+                        copyfileobj(resp, dst)
+                break
+            except FileExistsError:
+                self.log.debug("%s already exists, "
+                               "waiting for other thread to finish...",
+                               tmp_cache_file)
+                if self._wait_for_other_download(tmp_cache_file):
+                    return str(self.cache_file)
+                self.log.debug("%s seems to be stale, "
+                               "deleting and retrying download...",
+                               tmp_cache_file)
+                tmp_cache_file.unlink()
+                continue
+            except Exception as e:
+                self.log.error("Unable to download %s: %s", self.url, e)
+                tmp_cache_file.unlink()
+                raise
+
+        try:
+            # Set these just for informational purposes
+            os.setxattr(str(tmp_cache_file), "user.qemu-asset-url",
+                        self.url.encode('utf8'))
+            os.setxattr(str(tmp_cache_file), "user.qemu-asset-hash",
+                        self.hash.encode('utf8'))
+        except Exception as e:
+            self.log.debug("Unable to set xattr on %s: %s", tmp_cache_file, e)
+            pass
+
+        if not self._check(tmp_cache_file):
+            tmp_cache_file.unlink()
+            raise Exception("Hash of %s does not match %s" %
+                            (self.url, self.hash))
+        tmp_cache_file.replace(self.cache_file)
+
+        self.log.info("Cached %s at %s" % (self.url, self.cache_file))
+        return str(self.cache_file)
+
+    def precache_test(test):
+        log = logging.getLogger('qemu-test')
+        log.setLevel(logging.DEBUG)
+        handler = logging.StreamHandler(sys.stdout)
+        handler.setLevel(logging.DEBUG)
+        formatter = logging.Formatter(
+            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+        handler.setFormatter(formatter)
+        log.addHandler(handler)
+        for name, asset in vars(test.__class__).items():
+            if name.startswith("ASSET_") and type(asset) == Asset:
+                log.info("Attempting to cache '%s'" % asset)
+                asset.fetch()
+        log.removeHandler(handler)
+
+    def precache_suite(suite):
+        for test in suite:
+            if isinstance(test, unittest.TestSuite):
+                Asset.precache_suite(test)
+            elif isinstance(test, unittest.TestCase):
+                Asset.precache_test(test)
+
+    def precache_suites(path, cacheTstamp):
+        loader = unittest.loader.defaultTestLoader
+        tests = loader.loadTestsFromNames([path], None)
+
+        with open(cacheTstamp, "w") as fh:
+            Asset.precache_suite(tests)
diff --git a/tests/functional/qemu_test/cmd.py b/tests/functional/qemu_test/cmd.py
new file mode 100644
index 0000000000..3acd617324
--- /dev/null
+++ b/tests/functional/qemu_test/cmd.py
@@ -0,0 +1,193 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+#  Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import logging
+import os
+import os.path
+import subprocess
+
+from .config import BUILD_DIR
+
+
+def has_cmd(name, args=None):
+    """
+    This function is for use in a @skipUnless decorator, e.g.:
+
+        @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true')))
+        def test_something_that_needs_sudo(self):
+            ...
+    """
+
+    if args is None:
+        args = ('which', name)
+
+    try:
+        _, stderr, exitcode = run_cmd(args)
+    except Exception as e:
+        exitcode = -1
+        stderr = str(e)
+
+    if exitcode != 0:
+        cmd_line = ' '.join(args)
+        err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}'
+        return (False, err)
+    else:
+        return (True, '')
+
+def has_cmds(*cmds):
+    """
+    This function is for use in a @skipUnless decorator and
+    allows checking for the availability of multiple commands, e.g.:
+
+        @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')),
+                              'cmd2', 'cmd3'))
+        def test_something_that_needs_cmd1_and_cmd2(self):
+            ...
+    """
+
+    for cmd in cmds:
+        if isinstance(cmd, str):
+            cmd = (cmd,)
+
+        ok, errstr = has_cmd(*cmd)
+        if not ok:
+            return (False, errstr)
+
+    return (True, '')
+
+def run_cmd(args):
+    subp = subprocess.Popen(args,
+                            stdout=subprocess.PIPE,
+                            stderr=subprocess.PIPE,
+                            universal_newlines=True)
+    stdout, stderr = subp.communicate()
+    ret = subp.returncode
+
+    return (stdout, stderr, ret)
+
+def is_readable_executable_file(path):
+    return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK)
+
+def _console_interaction(test, success_message, failure_message,
+                         send_string, keep_sending=False, vm=None):
+    assert not keep_sending or send_string
+    if vm is None:
+        vm = test.vm
+    console = vm.console_file
+    console_logger = logging.getLogger('console')
+    while True:
+        if send_string:
+            vm.console_socket.sendall(send_string.encode())
+            if not keep_sending:
+                send_string = None # send only once
+
+        # Only consume console output if waiting for something
+        if success_message is None and failure_message is None:
+            if send_string is None:
+                break
+            continue
+
+        try:
+            msg = console.readline().decode().strip()
+        except UnicodeDecodeError:
+            msg = None
+        if not msg:
+            continue
+        console_logger.debug(msg)
+        if success_message is None or success_message in msg:
+            break
+        if failure_message and failure_message in msg:
+            console.close()
+            fail = 'Failure message found in console: "%s". Expected: "%s"' % \
+                    (failure_message, success_message)
+            test.fail(fail)
+
+def interrupt_interactive_console_until_pattern(test, success_message,
+                                                failure_message=None,
+                                                interrupt_string='\r'):
+    """
+    Keep sending a string to interrupt a console prompt, while logging the
+    console output. Typical use case is to break a boot loader prompt, such:
+
+        Press a key within 5 seconds to interrupt boot process.
+        5
+        4
+        3
+        2
+        1
+        Booting default image...
+
+    :param test: a  test containing a VM that will have its console
+                 read and probed for a success or failure message
+    :type test: :class:`qemu_test.QemuSystemTest`
+    :param success_message: if this message appears, test succeeds
+    :param failure_message: if this message appears, test fails
+    :param interrupt_string: a string to send to the console before trying
+                             to read a new line
+    """
+    _console_interaction(test, success_message, failure_message,
+                         interrupt_string, True)
+
+def wait_for_console_pattern(test, success_message, failure_message=None,
+                             vm=None):
+    """
+    Waits for messages to appear on the console, while logging the content
+
+    :param test: a test containing a VM that will have its console
+                 read and probed for a success or failure message
+    :type test: :class:`qemu_test.QemuSystemTest`
+    :param success_message: if this message appears, test succeeds
+    :param failure_message: if this message appears, test fails
+    """
+    _console_interaction(test, success_message, failure_message, None, vm=vm)
+
+def exec_command(test, command):
+    """
+    Send a command to a console (appending CRLF characters), while logging
+    the content.
+
+    :param test: a test containing a VM.
+    :type test: :class:`qemu_test.QemuSystemTest`
+    :param command: the command to send
+    :type command: str
+    """
+    _console_interaction(test, None, None, command + '\r')
+
+def exec_command_and_wait_for_pattern(test, command,
+                                      success_message, failure_message=None):
+    """
+    Send a command to a console (appending CRLF characters), then wait
+    for success_message to appear on the console, while logging the.
+    content. Mark the test as failed if failure_message is found instead.
+
+    :param test: a test containing a VM that will have its console
+                 read and probed for a success or failure message
+    :type test: :class:`qemu_test.QemuSystemTest`
+    :param command: the command to send
+    :param success_message: if this message appears, test succeeds
+    :param failure_message: if this message appears, test fails
+    """
+    _console_interaction(test, success_message, failure_message, command + '\r')
+
+def get_qemu_img(test):
+    test.log.debug('Looking for and selecting a qemu-img binary')
+
+    # If qemu-img has been built, use it, otherwise the system wide one
+    # will be used.
+    qemu_img = os.path.join(BUILD_DIR, 'qemu-img')
+    if os.path.exists(qemu_img):
+        return qemu_img
+    if has_cmd('qemu-img'):
+        return 'qemu-img'
+    test.skipTest('Could not find "qemu-img", which is required to '
+                  'create temporary images')
diff --git a/tests/functional/qemu_test/config.py b/tests/functional/qemu_test/config.py
new file mode 100644
index 0000000000..edd75b7fd0
--- /dev/null
+++ b/tests/functional/qemu_test/config.py
@@ -0,0 +1,36 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+#  Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+from pathlib import Path
+
+
+def _source_dir():
+    # Determine top-level directory of the QEMU sources
+    return Path(__file__).parent.parent.parent.parent
+
+def _build_dir():
+    root = os.getenv('QEMU_BUILD_ROOT')
+    if root is not None:
+        return Path(root)
+    # Makefile.mtest only exists in build dir, so if it is available, use CWD
+    if os.path.exists('Makefile.mtest'):
+        return Path(os.getcwd())
+
+    root = os.path.join(_source_dir(), 'build')
+    if os.path.exists(root):
+        return Path(root)
+
+    raise Exception("Cannot identify build dir, set QEMU_BUILD_ROOT")
+
+BUILD_DIR = _build_dir()
diff --git a/tests/functional/qemu_test/tesseract.py b/tests/functional/qemu_test/tesseract.py
new file mode 100644
index 0000000000..c4087b7c11
--- /dev/null
+++ b/tests/functional/qemu_test/tesseract.py
@@ -0,0 +1,35 @@
+# ...
+#
+# Copyright (c) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import re
+import logging
+
+from . import has_cmd, run_cmd
+
+def tesseract_available(expected_version):
+    if not has_cmd('tesseract'):
+        return False
+    (stdout, stderr, ret) = run_cmd([ 'tesseract', '--version'])
+    if ret:
+        return False
+    version = stdout.split()[1]
+    return int(version.split('.')[0]) >= expected_version
+
+def tesseract_ocr(image_path, tesseract_args=''):
+    console_logger = logging.getLogger('console')
+    console_logger.debug(image_path)
+    (stdout, stderr, ret) = run_cmd(['tesseract', image_path,
+                                     'stdout'])
+    if ret:
+        return None
+    lines = []
+    for line in stdout.split('\n'):
+        sline = line.strip()
+        if len(sline):
+            console_logger.debug(sline)
+            lines += [sline]
+    return lines
diff --git a/tests/functional/qemu_test/testcase.py b/tests/functional/qemu_test/testcase.py
new file mode 100644
index 0000000000..aa0146265a
--- /dev/null
+++ b/tests/functional/qemu_test/testcase.py
@@ -0,0 +1,202 @@
+# Test class and utilities for functional tests
+#
+# Copyright 2018, 2024 Red Hat, Inc.
+#
+# Original Author (Avocado-based tests):
+#  Cleber Rosa <crosa@redhat.com>
+#
+# Adaption for standalone version:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import logging
+import os
+import subprocess
+import pycotap
+import sys
+import unittest
+import uuid
+
+from qemu.machine import QEMUMachine
+from qemu.utils import kvm_available, tcg_available
+
+from .asset import Asset
+from .cmd import run_cmd
+from .config import BUILD_DIR
+
+
+class QemuBaseTest(unittest.TestCase):
+
+    qemu_bin = os.getenv('QEMU_TEST_QEMU_BINARY')
+    arch = None
+
+    workdir = None
+    log = None
+    logdir = None
+
+    def setUp(self, bin_prefix):
+        self.assertIsNotNone(self.qemu_bin, 'QEMU_TEST_QEMU_BINARY must be set')
+        self.arch = self.qemu_bin.split('-')[-1]
+
+        self.workdir = os.path.join(BUILD_DIR, 'tests/functional', self.arch,
+                                    self.id())
+        os.makedirs(self.workdir, exist_ok=True)
+
+        self.logdir = self.workdir
+        self.log = logging.getLogger('qemu-test')
+        self.log.setLevel(logging.DEBUG)
+        self._log_fh = logging.FileHandler(os.path.join(self.logdir,
+                                                        'base.log'), mode='w')
+        self._log_fh.setLevel(logging.DEBUG)
+        fileFormatter = logging.Formatter(
+            '%(asctime)s - %(levelname)s: %(message)s')
+        self._log_fh.setFormatter(fileFormatter)
+        self.log.addHandler(self._log_fh)
+
+    def tearDown(self):
+        self.log.removeHandler(self._log_fh)
+
+    def main():
+        path = os.path.basename(sys.argv[0])[:-3]
+
+        cache = os.environ.get("QEMU_TEST_PRECACHE", None)
+        if cache is not None:
+            Asset.precache_suites(path, cache)
+            return
+
+        tr = pycotap.TAPTestRunner(message_log = pycotap.LogMode.LogToError,
+                                   test_output_log = pycotap.LogMode.LogToError)
+        unittest.main(module = None, testRunner = tr, argv=["__dummy__", path])
+
+
+class QemuUserTest(QemuBaseTest):
+
+    def setUp(self):
+        super().setUp('qemu-')
+        self._ldpath = []
+
+    def add_ldpath(self, ldpath):
+        self._ldpath.append(os.path.abspath(ldpath))
+
+    def run_cmd(self, bin_path, args=[]):
+        return subprocess.run([self.qemu_bin]
+                              + ["-L %s" % ldpath for ldpath in self._ldpath]
+                              + [bin_path]
+                              + args,
+                              text=True, capture_output=True)
+
+class QemuSystemTest(QemuBaseTest):
+    """Facilitates system emulation tests."""
+
+    cpu = None
+    machine = None
+    _machinehelp = None
+
+    def setUp(self):
+        self._vms = {}
+
+        super().setUp('qemu-system-')
+
+        console_log = logging.getLogger('console')
+        console_log.setLevel(logging.DEBUG)
+        self._console_log_fh = logging.FileHandler(os.path.join(self.workdir,
+                                                   'console.log'), mode='w')
+        self._console_log_fh.setLevel(logging.DEBUG)
+        fileFormatter = logging.Formatter('%(asctime)s: %(message)s')
+        self._console_log_fh.setFormatter(fileFormatter)
+        console_log.addHandler(self._console_log_fh)
+
+    def set_machine(self, machinename):
+        # TODO: We should use QMP to get the list of available machines
+        if not self._machinehelp:
+            self._machinehelp = run_cmd([self.qemu_bin, '-M', 'help'])[0];
+        if self._machinehelp.find(machinename) < 0:
+            self.skipTest('no support for machine ' + machinename)
+        self.machine = machinename
+
+    def require_accelerator(self, accelerator):
+        """
+        Requires an accelerator to be available for the test to continue
+
+        It takes into account the currently set qemu binary.
+
+        If the check fails, the test is canceled.  If the check itself
+        for the given accelerator is not available, the test is also
+        canceled.
+
+        :param accelerator: name of the accelerator, such as "kvm" or "tcg"
+        :type accelerator: str
+        """
+        checker = {'tcg': tcg_available,
+                   'kvm': kvm_available}.get(accelerator)
+        if checker is None:
+            self.skipTest("Don't know how to check for the presence "
+                          "of accelerator %s" % accelerator)
+        if not checker(qemu_bin=self.qemu_bin):
+            self.skipTest("%s accelerator does not seem to be "
+                          "available" % accelerator)
+
+    def require_netdev(self, netdevname):
+        netdevhelp = run_cmd([self.qemu_bin,
+                             '-M', 'none', '-netdev', 'help'])[0];
+        if netdevhelp.find('\n' + netdevname + '\n') < 0:
+            self.skipTest('no support for " + netdevname + " networking')
+
+    def require_device(self, devicename):
+        devhelp = run_cmd([self.qemu_bin,
+                           '-M', 'none', '-device', 'help'])[0];
+        if devhelp.find(devicename) < 0:
+            self.skipTest('no support for device ' + devicename)
+
+    def _new_vm(self, name, *args):
+        vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir)
+        self.log.debug('QEMUMachine "%s" created', name)
+        self.log.debug('QEMUMachine "%s" temp_dir: %s', name, vm.temp_dir)
+        self.log.debug('QEMUMachine "%s" log_dir: %s', name, vm.log_dir)
+        if args:
+            vm.add_args(*args)
+        return vm
+
+    @property
+    def vm(self):
+        return self.get_vm(name='default')
+
+    def get_vm(self, *args, name=None):
+        if not name:
+            name = str(uuid.uuid4())
+        if self._vms.get(name) is None:
+            self._vms[name] = self._new_vm(name, *args)
+            if self.cpu is not None:
+                self._vms[name].add_args('-cpu', self.cpu)
+            if self.machine is not None:
+                self._vms[name].set_machine(self.machine)
+        return self._vms[name]
+
+    def set_vm_arg(self, arg, value):
+        """
+        Set an argument to list of extra arguments to be given to the QEMU
+        binary. If the argument already exists then its value is replaced.
+
+        :param arg: the QEMU argument, such as "-cpu" in "-cpu host"
+        :type arg: str
+        :param value: the argument value, such as "host" in "-cpu host"
+        :type value: str
+        """
+        if not arg or not value:
+            return
+        if arg not in self.vm.args:
+            self.vm.args.extend([arg, value])
+        else:
+            idx = self.vm.args.index(arg) + 1
+            if idx < len(self.vm.args):
+                self.vm.args[idx] = value
+            else:
+                self.vm.args.append(value)
+
+    def tearDown(self):
+        for vm in self._vms.values():
+            vm.shutdown()
+        logging.getLogger('console').removeHandler(self._console_log_fh)
+        super().tearDown()
diff --git a/tests/functional/qemu_test/utils.py b/tests/functional/qemu_test/utils.py
new file mode 100644
index 0000000000..2a1cb60d38
--- /dev/null
+++ b/tests/functional/qemu_test/utils.py
@@ -0,0 +1,56 @@
+# Utilities for python-based QEMU tests
+#
+# Copyright 2024 Red Hat, Inc.
+#
+# Authors:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import gzip
+import lzma
+import os
+import shutil
+import subprocess
+import tarfile
+
+def archive_extract(archive, dest_dir, member=None):
+    with tarfile.open(archive) as tf:
+        if hasattr(tarfile, 'data_filter'):
+            tf.extraction_filter = getattr(tarfile, 'data_filter',
+                                           (lambda member, path: member))
+        if member:
+            tf.extract(member=member, path=dest_dir)
+        else:
+            tf.extractall(path=dest_dir)
+
+def gzip_uncompress(gz_path, output_path):
+    if os.path.exists(output_path):
+        return
+    with gzip.open(gz_path, 'rb') as gz_in:
+        try:
+            with open(output_path, 'wb') as raw_out:
+                shutil.copyfileobj(gz_in, raw_out)
+        except:
+            os.remove(output_path)
+            raise
+
+def lzma_uncompress(xz_path, output_path):
+    if os.path.exists(output_path):
+        return
+    with lzma.open(xz_path, 'rb') as lzma_in:
+        try:
+            with open(output_path, 'wb') as raw_out:
+                shutil.copyfileobj(lzma_in, raw_out)
+        except:
+            os.remove(output_path)
+            raise
+
+def cpio_extract(cpio_handle, output_path):
+    cwd = os.getcwd()
+    os.chdir(output_path)
+    subprocess.run(['cpio', '-i'],
+                   input=cpio_handle.read(),
+                   stderr=subprocess.DEVNULL)
+    os.chdir(cwd)
diff --git a/tests/functional/test_aarch64_sbsaref.py b/tests/functional/test_aarch64_sbsaref.py
new file mode 100755
index 0000000000..f31c2a60b6
--- /dev/null
+++ b/tests/functional/test_aarch64_sbsaref.py
@@ -0,0 +1,186 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# SPDX-FileCopyrightText: 2023-2024 Linaro Ltd.
+# SPDX-FileContributor: Philippe Mathieu-Daudé <philmd@linaro.org>
+# SPDX-FileContributor: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import interrupt_interactive_console_until_pattern
+from qemu_test.utils import lzma_uncompress
+from unittest import skipUnless
+
+
+class Aarch64SbsarefMachine(QemuSystemTest):
+    """
+    As firmware runs at a higher privilege level than the hypervisor we
+    can only run these tests under TCG emulation.
+    """
+
+    timeout = 180
+
+    ASSET_FLASH0 = Asset(
+        ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/'
+         '20240619-148232/edk2/SBSA_FLASH0.fd.xz'),
+        '0c954842a590988f526984de22e21ae0ab9cb351a0c99a8a58e928f0c7359cf7')
+
+    ASSET_FLASH1 = Asset(
+        ('https://artifacts.codelinaro.org/artifactory/linaro-419-sbsa-ref/'
+         '20240619-148232/edk2/SBSA_FLASH1.fd.xz'),
+        'c6ec39374c4d79bb9e9cdeeb6db44732d90bb4a334cec92002b3f4b9cac4b5ee')
+
+    def fetch_firmware(self):
+        """
+        Flash volumes generated using:
+
+        Toolchain from Debian:
+        aarch64-linux-gnu-gcc (Debian 12.2.0-14) 12.2.0
+
+        Used components:
+
+        - Trusted Firmware         v2.11.0
+        - Tianocore EDK2           4d4f569924
+        - Tianocore EDK2-platforms 3f08401
+
+        """
+
+        # Secure BootRom (TF-A code)
+        fs0_xz_path = self.ASSET_FLASH0.fetch()
+        fs0_path = os.path.join(self.workdir, "SBSA_FLASH0.fd")
+        lzma_uncompress(fs0_xz_path, fs0_path)
+
+        # Non-secure rom (UEFI and EFI variables)
+        fs1_xz_path = self.ASSET_FLASH1.fetch()
+        fs1_path = os.path.join(self.workdir, "SBSA_FLASH1.fd")
+        lzma_uncompress(fs1_xz_path, fs1_path)
+
+        for path in [fs0_path, fs1_path]:
+            with open(path, "ab+") as fd:
+                fd.truncate(256 << 20)  # Expand volumes to 256MiB
+
+        self.set_machine('sbsa-ref')
+        self.vm.set_console()
+        self.vm.add_args(
+            "-drive", f"if=pflash,file={fs0_path},format=raw",
+            "-drive", f"if=pflash,file={fs1_path},format=raw",
+        )
+
+    def test_sbsaref_edk2_firmware(self):
+
+        self.fetch_firmware()
+
+        self.vm.add_args('-cpu', 'cortex-a57')
+        self.vm.launch()
+
+        # TF-A boot sequence:
+        #
+        # https://github.com/ARM-software/arm-trusted-firmware/blob/v2.8.0/\
+        #     docs/design/trusted-board-boot.rst#trusted-board-boot-sequence
+        # https://trustedfirmware-a.readthedocs.io/en/v2.8/\
+        #     design/firmware-design.html#cold-boot
+
+        # AP Trusted ROM
+        wait_for_console_pattern(self, "Booting Trusted Firmware")
+        wait_for_console_pattern(self, "BL1: v2.11.0(release):")
+        wait_for_console_pattern(self, "BL1: Booting BL2")
+
+        # Trusted Boot Firmware
+        wait_for_console_pattern(self, "BL2: v2.11.0(release)")
+        wait_for_console_pattern(self, "Booting BL31")
+
+        # EL3 Runtime Software
+        wait_for_console_pattern(self, "BL31: v2.11.0(release)")
+
+        # Non-trusted Firmware
+        wait_for_console_pattern(self, "UEFI firmware (version 1.0")
+        interrupt_interactive_console_until_pattern(self, "QEMU SBSA-REF Machine")
+
+
+    ASSET_ALPINE_ISO = Asset(
+        ('https://dl-cdn.alpinelinux.org/'
+         'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'),
+        '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027')
+
+    # This tests the whole boot chain from EFI to Userspace
+    # We only boot a whole OS for the current top level CPU and GIC
+    # Other test profiles should use more minimal boots
+    def boot_alpine_linux(self, cpu):
+        self.fetch_firmware()
+
+        iso_path = self.ASSET_ALPINE_ISO.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args(
+            "-cpu", cpu,
+            "-drive", f"file={iso_path},media=cdrom,format=raw",
+        )
+
+        self.vm.launch()
+        wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17")
+
+    def test_sbsaref_alpine_linux_cortex_a57(self):
+        self.boot_alpine_linux("cortex-a57")
+
+    def test_sbsaref_alpine_linux_neoverse_n1(self):
+        self.boot_alpine_linux("neoverse-n1")
+
+    def test_sbsaref_alpine_linux_max_pauth_off(self):
+        self.boot_alpine_linux("max,pauth=off")
+
+    def test_sbsaref_alpine_linux_max_pauth_impdef(self):
+        self.boot_alpine_linux("max,pauth-impdef=on")
+
+    @skipUnless(os.getenv('QEMU_TEST_TIMEOUT_EXPECTED'), 'Test might timeout')
+    def test_sbsaref_alpine_linux_max(self):
+        self.boot_alpine_linux("max")
+
+
+    ASSET_OPENBSD_ISO = Asset(
+        ('https://cdn.openbsd.org/pub/OpenBSD/7.3/arm64/miniroot73.img'),
+        '7fc2c75401d6f01fbfa25f4953f72ad7d7c18650056d30755c44b9c129b707e5')
+
+    # This tests the whole boot chain from EFI to Userspace
+    # We only boot a whole OS for the current top level CPU and GIC
+    # Other test profiles should use more minimal boots
+    def boot_openbsd73(self, cpu):
+        self.fetch_firmware()
+
+        img_path = self.ASSET_OPENBSD_ISO.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args(
+            "-cpu", cpu,
+            "-drive", f"file={img_path},format=raw,snapshot=on",
+        )
+
+        self.vm.launch()
+        wait_for_console_pattern(self,
+                                 "Welcome to the OpenBSD/arm64"
+                                 " 7.3 installation program.")
+
+    def test_sbsaref_openbsd73_cortex_a57(self):
+        self.boot_openbsd73("cortex-a57")
+
+    def test_sbsaref_openbsd73_neoverse_n1(self):
+        self.boot_openbsd73("neoverse-n1")
+
+    def test_sbsaref_openbsd73_max_pauth_off(self):
+        self.boot_openbsd73("max,pauth=off")
+
+    @skipUnless(os.getenv('QEMU_TEST_TIMEOUT_EXPECTED'), 'Test might timeout')
+    def test_sbsaref_openbsd73_max_pauth_impdef(self):
+        self.boot_openbsd73("max,pauth-impdef=on")
+
+    @skipUnless(os.getenv('QEMU_TEST_TIMEOUT_EXPECTED'), 'Test might timeout')
+    def test_sbsaref_openbsd73_max(self):
+        self.boot_openbsd73("max")
+
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/test_aarch64_virt.py
new file mode 100755
index 0000000000..c967da41b4
--- /dev/null
+++ b/tests/functional/test_aarch64_virt.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a various Linux systems and checks the
+# console output.
+#
+# Copyright (c) 2022 Linaro Ltd.
+#
+# Author:
+#  Alex Bennée <alex.bennee@linaro.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import time
+import os
+import logging
+
+from qemu_test import BUILD_DIR
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command, wait_for_console_pattern
+from qemu_test import get_qemu_img, run_cmd
+
+
+class Aarch64VirtMachine(QemuSystemTest):
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+    timeout = 360
+
+    def wait_for_console_pattern(self, success_message, vm=None):
+        wait_for_console_pattern(self, success_message,
+                                 failure_message='Kernel panic - not syncing',
+                                 vm=vm)
+
+    ASSET_ALPINE_ISO = Asset(
+        ('https://dl-cdn.alpinelinux.org/'
+         'alpine/v3.17/releases/aarch64/alpine-standard-3.17.2-aarch64.iso'),
+        '5a36304ecf039292082d92b48152a9ec21009d3a62f459de623e19c4bd9dc027')
+
+    # This tests the whole boot chain from EFI to Userspace
+    # We only boot a whole OS for the current top level CPU and GIC
+    # Other test profiles should use more minimal boots
+    def test_alpine_virt_tcg_gic_max(self):
+        iso_path = self.ASSET_ALPINE_ISO.fetch()
+
+        self.set_machine('virt')
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                               'console=ttyAMA0')
+        self.require_accelerator("tcg")
+
+        self.vm.add_args("-accel", "tcg")
+        self.vm.add_args("-cpu", "max,pauth-impdef=on")
+        self.vm.add_args("-machine",
+                         "virt,acpi=on,"
+                         "virtualization=on,"
+                         "mte=on,"
+                         "gic-version=max,iommu=smmuv3")
+        self.vm.add_args("-smp", "2", "-m", "1024")
+        self.vm.add_args('-bios', os.path.join(BUILD_DIR, 'pc-bios',
+                                               'edk2-aarch64-code.fd'))
+        self.vm.add_args("-drive", f"file={iso_path},media=cdrom,format=raw")
+        self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+        self.vm.add_args('-object', 'rng-random,id=rng0,filename=/dev/urandom')
+
+        self.vm.launch()
+        self.wait_for_console_pattern('Welcome to Alpine Linux 3.17')
+
+
+    ASSET_KERNEL = Asset(
+        ('https://fileserver.linaro.org/s/'
+         'z6B2ARM7DQT3HWN/download'),
+        '12a54d4805cda6ab647cb7c7bbdb16fafb3df400e0d6f16445c1a0436100ef8d')
+
+    def common_aarch64_virt(self, machine):
+        """
+        Common code to launch basic virt machine with kernel+initrd
+        and a scratch disk.
+        """
+        logger = logging.getLogger('aarch64_virt')
+
+        kernel_path = self.ASSET_KERNEL.fetch()
+
+        self.set_machine('virt')
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                               'console=ttyAMA0')
+        self.require_accelerator("tcg")
+        self.vm.add_args('-cpu', 'max,pauth-impdef=on',
+                         '-machine', machine,
+                         '-accel', 'tcg',
+                         '-kernel', kernel_path,
+                         '-append', kernel_command_line)
+
+        # A RNG offers an easy way to generate a few IRQs
+        self.vm.add_args('-device', 'virtio-rng-pci,rng=rng0')
+        self.vm.add_args('-object',
+                         'rng-random,id=rng0,filename=/dev/urandom')
+
+        # Also add a scratch block device
+        logger.info('creating scratch qcow2 image')
+        image_path = os.path.join(self.workdir, 'scratch.qcow2')
+        qemu_img = get_qemu_img(self)
+        run_cmd([qemu_img, 'create', '-f', 'qcow2', image_path, '8M'])
+
+        # Add the device
+        self.vm.add_args('-blockdev',
+                         f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch")
+        self.vm.add_args('-device',
+                         'virtio-blk-device,drive=scratch')
+
+        self.vm.launch()
+        self.wait_for_console_pattern('Welcome to Buildroot')
+        time.sleep(0.1)
+        exec_command(self, 'root')
+        time.sleep(0.1)
+        exec_command(self, 'dd if=/dev/hwrng of=/dev/vda bs=512 count=4')
+        time.sleep(0.1)
+        exec_command(self, 'md5sum /dev/vda')
+        time.sleep(0.1)
+        exec_command(self, 'cat /proc/interrupts')
+        time.sleep(0.1)
+        exec_command(self, 'cat /proc/self/maps')
+        time.sleep(0.1)
+
+    def test_aarch64_virt_gicv3(self):
+        self.common_aarch64_virt("virt,gic_version=3")
+
+    def test_aarch64_virt_gicv2(self):
+        self.common_aarch64_virt("virt,gic-version=2")
+
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_acpi_bits.py b/tests/functional/test_acpi_bits.py
new file mode 100755
index 0000000000..ee40647d5b
--- /dev/null
+++ b/tests/functional/test_acpi_bits.py
@@ -0,0 +1,410 @@
+#!/usr/bin/env python3
+#
+# Exercise QEMU generated ACPI/SMBIOS tables using biosbits,
+# https://biosbits.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+#
+# Author:
+#  Ani Sinha <anisinha@redhat.com>
+
+# pylint: disable=invalid-name
+# pylint: disable=consider-using-f-string
+
+"""
+This is QEMU ACPI/SMBIOS functional tests using biosbits.
+Biosbits is available originally at https://biosbits.org/.
+This test uses a fork of the upstream bits and has numerous fixes
+including an upgraded acpica. The fork is located here:
+https://gitlab.com/qemu-project/biosbits-bits .
+"""
+
+import logging
+import os
+import platform
+import re
+import shutil
+import subprocess
+import tarfile
+import tempfile
+import time
+import zipfile
+
+from pathlib import Path
+from typing import (
+    List,
+    Optional,
+    Sequence,
+)
+from qemu.machine import QEMUMachine
+from unittest import skipIf
+from qemu_test import QemuBaseTest, Asset
+
+deps = ["xorriso", "mformat"] # dependent tools needed in the test setup/box.
+supported_platforms = ['x86_64'] # supported test platforms.
+
+# default timeout of 120 secs is sometimes not enough for bits test.
+BITS_TIMEOUT = 200
+
+def which(tool):
+    """ looks up the full path for @tool, returns None if not found
+        or if @tool does not have executable permissions.
+    """
+    paths=os.getenv('PATH')
+    for p in paths.split(os.path.pathsep):
+        p = os.path.join(p, tool)
+        if os.path.exists(p) and os.access(p, os.X_OK):
+            return p
+    return None
+
+def missing_deps():
+    """ returns True if any of the test dependent tools are absent.
+    """
+    for dep in deps:
+        if which(dep) is None:
+            return True
+    return False
+
+def supported_platform():
+    """ checks if the test is running on a supported platform.
+    """
+    return platform.machine() in supported_platforms
+
+class QEMUBitsMachine(QEMUMachine): # pylint: disable=too-few-public-methods
+    """
+    A QEMU VM, with isa-debugcon enabled and bits iso passed
+    using -cdrom to QEMU commandline.
+
+    """
+    def __init__(self,
+                 binary: str,
+                 args: Sequence[str] = (),
+                 wrapper: Sequence[str] = (),
+                 name: Optional[str] = None,
+                 base_temp_dir: str = "/var/tmp",
+                 debugcon_log: str = "debugcon-log.txt",
+                 debugcon_addr: str = "0x403",
+                 qmp_timer: Optional[float] = None):
+        # pylint: disable=too-many-arguments
+
+        if name is None:
+            name = "qemu-bits-%d" % os.getpid()
+        super().__init__(binary, args, wrapper=wrapper, name=name,
+                         base_temp_dir=base_temp_dir,
+                         qmp_timer=qmp_timer)
+        self.debugcon_log = debugcon_log
+        self.debugcon_addr = debugcon_addr
+        self.base_temp_dir = base_temp_dir
+
+    @property
+    def _base_args(self) -> List[str]:
+        args = super()._base_args
+        args.extend([
+            '-chardev',
+            'file,path=%s,id=debugcon' %os.path.join(self.base_temp_dir,
+                                                     self.debugcon_log),
+            '-device',
+            'isa-debugcon,iobase=%s,chardev=debugcon' %self.debugcon_addr,
+        ])
+        return args
+
+    def base_args(self):
+        """return the base argument to QEMU binary"""
+        return self._base_args
+
+@skipIf(not supported_platform() or missing_deps(),
+        'unsupported platform or dependencies (%s) not installed' \
+        % ','.join(deps))
+class AcpiBitsTest(QemuBaseTest): #pylint: disable=too-many-instance-attributes
+    """
+    ACPI and SMBIOS tests using biosbits.
+    """
+    # in slower systems the test can take as long as 3 minutes to complete.
+    timeout = BITS_TIMEOUT
+
+    # following are some standard configuration constants
+    # gitlab CI does shallow clones of depth 20
+    BITS_INTERNAL_VER = 2020
+    # commit hash must match the artifact tag below
+    BITS_COMMIT_HASH = 'c7920d2b'
+    # this is the latest bits release as of today.
+    BITS_TAG = "qemu-bits-10262023"
+
+    ASSET_BITS = Asset(("https://gitlab.com/qemu-project/"
+                        "biosbits-bits/-/jobs/artifacts/%s/"
+                        "download?job=qemu-bits-build" % BITS_TAG),
+                       '1b8dd612c6831a6b491716a77acc486666aaa867051cdc34f7ce169c2e25f487')
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._vm = None
+        self._workDir = None
+        self._baseDir = None
+
+        self._debugcon_addr = '0x403'
+        self._debugcon_log = 'debugcon-log.txt'
+        self.logger = self.log
+
+    def _print_log(self, log):
+        self.logger.info('\nlogs from biosbits follows:')
+        self.logger.info('==========================================\n')
+        self.logger.info(log)
+        self.logger.info('==========================================\n')
+
+    def copy_bits_config(self):
+        """ copies the bios bits config file into bits.
+        """
+        config_file = 'bits-cfg.txt'
+        bits_config_dir = os.path.join(self._baseDir, 'acpi-bits',
+                                       'bits-config')
+        target_config_dir = os.path.join(self._workDir,
+                                         'bits-%d' %self.BITS_INTERNAL_VER,
+                                         'boot')
+        self.assertTrue(os.path.exists(bits_config_dir))
+        self.assertTrue(os.path.exists(target_config_dir))
+        self.assertTrue(os.access(os.path.join(bits_config_dir,
+                                               config_file), os.R_OK))
+        shutil.copy2(os.path.join(bits_config_dir, config_file),
+                     target_config_dir)
+        self.logger.info('copied config file %s to %s',
+                         config_file, target_config_dir)
+
+    def copy_test_scripts(self):
+        """copies the python test scripts into bits. """
+
+        bits_test_dir = os.path.join(self._baseDir, 'acpi-bits',
+                                     'bits-tests')
+        target_test_dir = os.path.join(self._workDir,
+                                       'bits-%d' %self.BITS_INTERNAL_VER,
+                                       'boot', 'python')
+
+        self.assertTrue(os.path.exists(bits_test_dir))
+        self.assertTrue(os.path.exists(target_test_dir))
+
+        for filename in os.listdir(bits_test_dir):
+            if os.path.isfile(os.path.join(bits_test_dir, filename)) and \
+               filename.endswith('.py2'):
+                # all test scripts are named with extension .py2 so that
+                # avocado does not try to load them. These scripts are
+                # written for python 2.7 not python 3 and hence if avocado
+                # loaded them, it would complain about python 3 specific
+                # syntaxes.
+                newfilename = os.path.splitext(filename)[0] + '.py'
+                shutil.copy2(os.path.join(bits_test_dir, filename),
+                             os.path.join(target_test_dir, newfilename))
+                self.logger.info('copied test file %s to %s',
+                                 filename, target_test_dir)
+
+                # now remove the pyc test file if it exists, otherwise the
+                # changes in the python test script won't be executed.
+                testfile_pyc = os.path.splitext(filename)[0] + '.pyc'
+                if os.access(os.path.join(target_test_dir, testfile_pyc),
+                             os.F_OK):
+                    os.remove(os.path.join(target_test_dir, testfile_pyc))
+                    self.logger.info('removed compiled file %s',
+                                     os.path.join(target_test_dir,
+                                     testfile_pyc))
+
+    def fix_mkrescue(self, mkrescue):
+        """ grub-mkrescue is a bash script with two variables, 'prefix' and
+            'libdir'. They must be pointed to the right location so that the
+            iso can be generated appropriately. We point the two variables to
+            the directory where we have extracted our pre-built bits grub
+            tarball.
+        """
+        grub_x86_64_mods = os.path.join(self._workDir, 'grub-inst-x86_64-efi')
+        grub_i386_mods = os.path.join(self._workDir, 'grub-inst')
+
+        self.assertTrue(os.path.exists(grub_x86_64_mods))
+        self.assertTrue(os.path.exists(grub_i386_mods))
+
+        new_script = ""
+        with open(mkrescue, 'r', encoding='utf-8') as filehandle:
+            orig_script = filehandle.read()
+            new_script = re.sub('(^prefix=)(.*)',
+                                r'\1"%s"' %grub_x86_64_mods,
+                                orig_script, flags=re.M)
+            new_script = re.sub('(^libdir=)(.*)', r'\1"%s/lib"' %grub_i386_mods,
+                                new_script, flags=re.M)
+
+        with open(mkrescue, 'w', encoding='utf-8') as filehandle:
+            filehandle.write(new_script)
+
+    def generate_bits_iso(self):
+        """ Uses grub-mkrescue to generate a fresh bits iso with the python
+            test scripts
+        """
+        bits_dir = os.path.join(self._workDir,
+                                'bits-%d' %self.BITS_INTERNAL_VER)
+        iso_file = os.path.join(self._workDir,
+                                'bits-%d.iso' %self.BITS_INTERNAL_VER)
+        mkrescue_script = os.path.join(self._workDir,
+                                       'grub-inst-x86_64-efi', 'bin',
+                                       'grub-mkrescue')
+
+        self.assertTrue(os.access(mkrescue_script,
+                                  os.R_OK | os.W_OK | os.X_OK))
+
+        self.fix_mkrescue(mkrescue_script)
+
+        self.logger.info('using grub-mkrescue for generating biosbits iso ...')
+
+        try:
+            if os.getenv('V') or os.getenv('BITS_DEBUG'):
+                proc = subprocess.run([mkrescue_script, '-o', iso_file,
+                                       bits_dir],
+                                      stdout=subprocess.PIPE,
+                                      stderr=subprocess.STDOUT,
+                                      check=True)
+                self.logger.info("grub-mkrescue output %s" % proc.stdout)
+            else:
+                subprocess.check_call([mkrescue_script, '-o',
+                                      iso_file, bits_dir],
+                                      stderr=subprocess.DEVNULL,
+                                      stdout=subprocess.DEVNULL)
+        except Exception as e: # pylint: disable=broad-except
+            self.skipTest("Error while generating the bits iso. "
+                          "Pass V=1 in the environment to get more details. "
+                          + str(e))
+
+        self.assertTrue(os.access(iso_file, os.R_OK))
+
+        self.logger.info('iso file %s successfully generated.', iso_file)
+
+    def setUp(self): # pylint: disable=arguments-differ
+        super().setUp('qemu-system-')
+        self.logger = self.log
+
+        self._baseDir = Path(__file__).parent
+
+        # workdir could also be avocado's own workdir in self.workdir.
+        # At present, I prefer to maintain my own temporary working
+        # directory. It gives us more control over the generated bits
+        # log files and also for debugging, we may chose not to remove
+        # this working directory so that the logs and iso can be
+        # inspected manually and archived if needed.
+        self._workDir = tempfile.mkdtemp(prefix='acpi-bits-',
+                                         suffix='.tmp')
+        self.logger.info('working dir: %s', self._workDir)
+
+        prebuiltDir = os.path.join(self._workDir, 'prebuilt')
+        if not os.path.isdir(prebuiltDir):
+            os.mkdir(prebuiltDir, mode=0o775)
+
+        bits_zip_file = os.path.join(prebuiltDir, 'bits-%d-%s.zip'
+                                     %(self.BITS_INTERNAL_VER,
+                                       self.BITS_COMMIT_HASH))
+        grub_tar_file = os.path.join(prebuiltDir,
+                                     'bits-%d-%s-grub.tar.gz'
+                                     %(self.BITS_INTERNAL_VER,
+                                       self.BITS_COMMIT_HASH))
+
+        bitsLocalArtLoc = self.ASSET_BITS.fetch()
+        self.logger.info("downloaded bits artifacts to %s", bitsLocalArtLoc)
+
+        # extract the bits artifact in the temp working directory
+        with zipfile.ZipFile(bitsLocalArtLoc, 'r') as zref:
+            zref.extractall(prebuiltDir)
+
+        # extract the bits software in the temp working directory
+        with zipfile.ZipFile(bits_zip_file, 'r') as zref:
+            zref.extractall(self._workDir)
+
+        with tarfile.open(grub_tar_file, 'r', encoding='utf-8') as tarball:
+            tarball.extractall(self._workDir)
+
+        self.copy_test_scripts()
+        self.copy_bits_config()
+        self.generate_bits_iso()
+
+    def parse_log(self):
+        """parse the log generated by running bits tests and
+           check for failures.
+        """
+        debugconf = os.path.join(self._workDir, self._debugcon_log)
+        log = ""
+        with open(debugconf, 'r', encoding='utf-8') as filehandle:
+            log = filehandle.read()
+
+        matchiter = re.finditer(r'(.*Summary: )(\d+ passed), (\d+ failed).*',
+                                log)
+        for match in matchiter:
+            # verify that no test cases failed.
+            try:
+                self.assertEqual(match.group(3).split()[0], '0',
+                                 'Some bits tests seems to have failed. ' \
+                                 'Please check the test logs for more info.')
+            except AssertionError as e:
+                self._print_log(log)
+                raise e
+            else:
+                if os.getenv('V') or os.getenv('BITS_DEBUG'):
+                    self._print_log(log)
+
+    def tearDown(self):
+        """
+           Lets do some cleanups.
+        """
+        if self._vm:
+            self.assertFalse(not self._vm.is_running)
+        if not os.getenv('BITS_DEBUG') and self._workDir:
+            self.logger.info('removing the work directory %s', self._workDir)
+            shutil.rmtree(self._workDir)
+        else:
+            self.logger.info('not removing the work directory %s ' \
+                             'as BITS_DEBUG is ' \
+                             'passed in the environment', self._workDir)
+        super().tearDown()
+
+    def test_acpi_smbios_bits(self):
+        """The main test case implementation."""
+
+        iso_file = os.path.join(self._workDir,
+                                'bits-%d.iso' %self.BITS_INTERNAL_VER)
+
+        self.assertTrue(os.access(iso_file, os.R_OK))
+
+        self._vm = QEMUBitsMachine(binary=self.qemu_bin,
+                                   base_temp_dir=self._workDir,
+                                   debugcon_log=self._debugcon_log,
+                                   debugcon_addr=self._debugcon_addr)
+
+        self._vm.add_args('-cdrom', '%s' %iso_file)
+        # the vm needs to be run under icount so that TCG emulation is
+        # consistent in terms of timing. smilatency tests have consistent
+        # timing requirements.
+        self._vm.add_args('-icount', 'auto')
+        # currently there is no support in bits for recognizing 64-bit SMBIOS
+        # entry points. QEMU defaults to 64-bit entry points since the
+        # upstream commit bf376f3020 ("hw/i386/pc: Default to use SMBIOS 3.0
+        # for newer machine models"). Therefore, enforce 32-bit entry point.
+        self._vm.add_args('-machine', 'smbios-entry-point-type=32')
+
+        # enable console logging
+        self._vm.set_console()
+        self._vm.launch()
+
+
+        # biosbits has been configured to run all the specified test suites
+        # in batch mode and then automatically initiate a vm shutdown.
+        # Set timeout to BITS_TIMEOUT for SHUTDOWN event from bits VM at par
+        # with the avocado test timeout.
+        self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT)
+        self._vm.wait(timeout=None)
+        self.logger.debug("Checking console output ...")
+        self.parse_log()
+
+if __name__ == '__main__':
+    QemuBaseTest.main()
diff --git a/tests/functional/test_arm_bflt.py b/tests/functional/test_arm_bflt.py
new file mode 100755
index 0000000000..281925d11a
--- /dev/null
+++ b/tests/functional/test_arm_bflt.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python3
+#
+# Test the bFLT loader format
+#
+# Copyright (C) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import bz2
+
+from qemu_test import QemuUserTest, Asset
+from qemu_test import has_cmd
+from qemu_test.utils import cpio_extract
+from unittest import skipUnless
+
+
+class LoadBFLT(QemuUserTest):
+
+    ASSET_ROOTFS = Asset(
+        ('https://elinux.org/images/5/51/Stm32_mini_rootfs.cpio.bz2'),
+         'eefb788e4980c9e8d6c9d60ce7d15d4da6bf4fbc6a80f487673824600d5ba9cc')
+
+    @skipUnless(*has_cmd('cpio'))
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    def test_stm32(self):
+        # See https://elinux.org/STM32#User_Space
+        rootfs_path_bz2 = self.ASSET_ROOTFS.fetch()
+        busybox_path = os.path.join(self.workdir, "bin/busybox")
+
+        with bz2.open(rootfs_path_bz2, 'rb') as cpio_handle:
+            cpio_extract(cpio_handle, self.workdir)
+
+        res = self.run_cmd(busybox_path)
+        ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.'
+        self.assertIn(ver, res.stdout)
+
+        res = self.run_cmd(busybox_path, ['uname', '-a'])
+        unm = 'armv7l GNU/Linux'
+        self.assertIn(unm, res.stdout)
+
+
+if __name__ == '__main__':
+    QemuUserTest.main()
diff --git a/tests/functional/test_arm_canona1100.py b/tests/functional/test_arm_canona1100.py
new file mode 100755
index 0000000000..65f1228296
--- /dev/null
+++ b/tests/functional/test_arm_canona1100.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots the canon-a1100 machine with firmware
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import archive_extract
+
+class CanonA1100Machine(QemuSystemTest):
+    """Boots the barebox firmware and checks that the console is operational"""
+
+    timeout = 90
+
+    ASSET_BIOS = Asset(('https://qemu-advcal.gitlab.io'
+                        '/qac-best-of-multiarch/download/day18.tar.xz'),
+                       '28e71874ce985be66b7fd1345ed88cb2523b982f899c8d2900d6353054a1be49')
+
+    def test_arm_canona1100(self):
+        self.set_machine('canon-a1100')
+
+        file_path = self.ASSET_BIOS.fetch()
+        archive_extract(file_path, dest_dir=self.workdir,
+                        member="day18/barebox.canon-a1100.bin")
+        self.vm.set_console()
+        self.vm.add_args('-bios',
+                         self.workdir + '/day18/barebox.canon-a1100.bin')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'running /env/bin/init')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_arm_integratorcp.py b/tests/functional/test_arm_integratorcp.py
new file mode 100755
index 0000000000..0fe083f661
--- /dev/null
+++ b/tests/functional/test_arm_integratorcp.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import logging
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from unittest import skipUnless
+
+
+NUMPY_AVAILABLE = True
+try:
+    import numpy as np
+except ImportError:
+    NUMPY_AVAILABLE = False
+
+CV2_AVAILABLE = True
+try:
+    import cv2
+except ImportError:
+    CV2_AVAILABLE = False
+
+
+class IntegratorMachine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_KERNEL = Asset(
+        ('https://github.com/zayac/qemu-arm/raw/master/'
+         'arm-test/kernel/zImage.integrator'),
+        '26e7c7e8f943de785d95bd3c74d66451604a9b6a7a3d25dceb279e7548fd8e78')
+
+    ASSET_INITRD = Asset(
+        ('https://github.com/zayac/qemu-arm/raw/master/'
+         'arm-test/kernel/arm_root.img'),
+        'e187c27fb342ad148c7f33475fbed124933e0b3f4be8c74bc4f3426a4793373a')
+
+    ASSET_TUXLOGO = Asset(
+        ('https://github.com/torvalds/linux/raw/v2.6.12/'
+         'drivers/video/logo/logo_linux_vga16.ppm'),
+        'b762f0d91ec018887ad1b334543c2fdf9be9fdfc87672b409211efaa3ea0ef79')
+
+    def boot_integratorcp(self):
+        kernel_path = self.ASSET_KERNEL.fetch()
+        initrd_path = self.ASSET_INITRD.fetch()
+
+        self.set_machine('integratorcp')
+        self.vm.set_console()
+        self.vm.add_args('-kernel', kernel_path,
+                         '-initrd', initrd_path,
+                         '-append', 'printk.time=0 console=ttyAMA0')
+        self.vm.launch()
+
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    def test_integratorcp_console(self):
+        """
+        Boots the Linux kernel and checks that the console is operational
+        """
+        self.boot_integratorcp()
+        wait_for_console_pattern(self, 'Log in as root')
+
+    @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed')
+    @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed')
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    def test_framebuffer_tux_logo(self):
+        """
+        Boot Linux and verify the Tux logo is displayed on the framebuffer.
+        """
+        screendump_path = os.path.join(self.workdir, "screendump.pbm")
+        tuxlogo_path = self.ASSET_TUXLOGO.fetch()
+
+        self.boot_integratorcp()
+        framebuffer_ready = 'Console: switching to colour frame buffer device'
+        wait_for_console_pattern(self, framebuffer_ready)
+        self.vm.cmd('human-monitor-command', command_line='stop')
+        self.vm.cmd('human-monitor-command',
+                    command_line='screendump %s' % screendump_path)
+        logger = logging.getLogger('framebuffer')
+
+        cpu_count = 1
+        match_threshold = 0.92
+        screendump_bgr = cv2.imread(screendump_path)
+        screendump_gray = cv2.cvtColor(screendump_bgr, cv2.COLOR_BGR2GRAY)
+        result = cv2.matchTemplate(screendump_gray, cv2.imread(tuxlogo_path, 0),
+                                   cv2.TM_CCOEFF_NORMED)
+        loc = np.where(result >= match_threshold)
+        tux_count = 0
+        for tux_count, pt in enumerate(zip(*loc[::-1]), start=1):
+            logger.debug('found Tux at position [x, y] = %s', pt)
+        self.assertGreaterEqual(tux_count, cpu_count)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_avr_mega2560.py b/tests/functional/test_avr_mega2560.py
new file mode 100755
index 0000000000..8e47b4200b
--- /dev/null
+++ b/tests/functional/test_avr_mega2560.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+#
+# QEMU AVR integration tests
+#
+# Copyright (c) 2019-2020 Michael Rolnik <mrolnik@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+
+from qemu_test import QemuSystemTest, Asset
+
+class AVR6Machine(QemuSystemTest):
+    timeout = 5
+
+    ASSET_ROM = Asset(('https://github.com/seharris/qemu-avr-tests'
+                       '/raw/36c3e67b8755dcf/free-rtos/Demo'
+                       '/AVR_ATMega2560_GCC/demo.elf'),
+                      'ee4833bd65fc69e84a79ed1c608affddbd499a60e63acf87d9113618401904e4')
+
+    def test_freertos(self):
+        """
+        https://github.com/seharris/qemu-avr-tests/raw/master/free-rtos/Demo/AVR_ATMega2560_GCC/demo.elf
+        constantly prints out 'ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX'
+        """
+        rom_path = self.ASSET_ROM.fetch()
+
+        self.set_machine('arduino-mega-2560-v3')
+        self.vm.add_args('-bios', rom_path)
+        self.vm.add_args('-nographic')
+        self.vm.launch()
+
+        time.sleep(2)
+        self.vm.shutdown()
+
+        self.assertIn('ABCDEFGHIJKLMNOPQRSTUVWXABCDEFGHIJKLMNOPQRSTUVWX',
+                self.vm.get_log())
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_cpu_queries.py b/tests/functional/test_cpu_queries.py
new file mode 100755
index 0000000000..b1122a0e8f
--- /dev/null
+++ b/tests/functional/test_cpu_queries.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Sanity check of query-cpu-* results
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# Author:
+#  Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+
+class QueryCPUModelExpansion(QemuSystemTest):
+    """
+    Run query-cpu-model-expansion for each CPU model, and validate results
+    """
+
+    def test(self):
+        self.set_machine('none')
+        self.vm.add_args('-S')
+        self.vm.launch()
+
+        cpus = self.vm.cmd('query-cpu-definitions')
+        for c in cpus:
+            self.log.info("Checking CPU: %s", c)
+            self.assertNotIn('', c['unavailable-features'], c['name'])
+
+        for c in cpus:
+            model = {'name': c['name']}
+            e = self.vm.cmd('query-cpu-model-expansion', model=model,
+                            type='full')
+            self.assertEqual(e['model']['name'], c['name'])
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_empty_cpu_model.py b/tests/functional/test_empty_cpu_model.py
new file mode 100755
index 0000000000..0081b06d85
--- /dev/null
+++ b/tests/functional/test_empty_cpu_model.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python3
+#
+# Check for crash when using empty -cpu option
+#
+# Copyright (c) 2019 Red Hat, Inc.
+#
+# Author:
+#  Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+from qemu_test import QemuSystemTest
+
+class EmptyCPUModel(QemuSystemTest):
+    def test(self):
+        self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'-cpu option cannot be empty')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_info_usernet.py b/tests/functional/test_info_usernet.py
new file mode 100755
index 0000000000..cd37524d94
--- /dev/null
+++ b/tests/functional/test_info_usernet.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Test for the hmp command "info usernet"
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# Author:
+#  Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+
+from qemu.utils import get_info_usernet_hostfwd_port
+
+
+class InfoUsernet(QemuSystemTest):
+
+    def test_hostfwd(self):
+        self.require_netdev('user')
+        self.set_machine('none')
+        self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22')
+        self.vm.launch()
+        res = self.vm.cmd('human-monitor-command',
+                          command_line='info usernet')
+        port = get_info_usernet_hostfwd_port(res)
+        self.assertIsNotNone(port,
+                             ('"info usernet" output content does not seem to '
+                              'contain the redirected port'))
+        self.assertGreater(port, 0,
+                           ('Found a redirected port that is not greater than'
+                            ' zero'))
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_linux_initrd.py b/tests/functional/test_linux_initrd.py
new file mode 100755
index 0000000000..c71a59d4c9
--- /dev/null
+++ b/tests/functional/test_linux_initrd.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python3
+#
+# Linux initrd integration test.
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+#  Wainer dos Santos Moschetta <wainersm@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+import logging
+import tempfile
+
+from qemu_test import QemuSystemTest, Asset
+from unittest import skipUnless
+
+
+class LinuxInitrd(QemuSystemTest):
+    """
+    Checks QEMU evaluates correctly the initrd file passed as -initrd option.
+    """
+
+    timeout = 300
+
+    ASSET_F18_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+         'releases/18/Fedora/x86_64/os/images/pxeboot/vmlinuz'),
+        '1a27cb42559ce29237ac186699d063556ad69c8349d732bb1bd8d614e5a8cc2e')
+
+    ASSET_F28_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive/fedora/linux/'
+         'releases/28/Everything/x86_64/os/images/pxeboot/vmlinuz'),
+        'd05909c9d4a742a6fcc84dcc0361009e4611769619cc187a07107579a035f24e')
+
+    def test_with_2gib_file_should_exit_error_msg_with_linux_v3_6(self):
+        """
+        Pretends to boot QEMU with an initrd file with size of 2GiB
+        and expect it exits with error message.
+        Fedora-18 shipped with linux-3.6 which have not supported xloadflags
+        cannot support more than 2GiB initrd.
+        """
+        self.set_machine('pc')
+        kernel_path = self.ASSET_F18_KERNEL.fetch()
+        max_size = 2 * (1024 ** 3) - 1
+
+        with tempfile.NamedTemporaryFile() as initrd:
+            initrd.seek(max_size)
+            initrd.write(b'\0')
+            initrd.flush()
+            self.vm.add_args('-kernel', kernel_path, '-initrd', initrd.name,
+                             '-m', '4096')
+            self.vm.set_qmp_monitor(enabled=False)
+            self.vm.launch()
+            self.vm.wait()
+            self.assertEqual(self.vm.exitcode(), 1)
+            expected_msg = r'.*initrd is too large.*max: \d+, need %s.*' % (
+                max_size + 1)
+            self.assertRegex(self.vm.get_log(), expected_msg)
+
+    @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
+    def test_with_2gib_file_should_work_with_linux_v4_16(self):
+        """
+        QEMU has supported up to 4 GiB initrd for recent kernel
+        Expect guest can reach 'Unpacking initramfs...'
+        """
+        self.set_machine('pc')
+        kernel_path = self.ASSET_F28_KERNEL.fetch()
+        max_size = 2 * (1024 ** 3) + 1
+
+        with tempfile.NamedTemporaryFile() as initrd:
+            initrd.seek(max_size)
+            initrd.write(b'\0')
+            initrd.flush()
+
+            self.vm.set_console()
+            kernel_command_line = 'console=ttyS0'
+            self.vm.add_args('-kernel', kernel_path,
+                             '-append', kernel_command_line,
+                             '-initrd', initrd.name,
+                             '-m', '5120')
+            self.vm.launch()
+            console = self.vm.console_socket.makefile()
+            console_logger = logging.getLogger('console')
+            while True:
+                msg = console.readline()
+                console_logger.debug(msg.strip())
+                if 'Unpacking initramfs...' in msg:
+                    break
+                if 'Kernel panic - not syncing' in msg:
+                    self.fail("Kernel panic reached")
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_loongarch64_virt.py b/tests/functional/test_loongarch64_virt.py
new file mode 100755
index 0000000000..2b8baa2c2a
--- /dev/null
+++ b/tests/functional/test_loongarch64_virt.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# LoongArch virt test.
+#
+# Copyright (c) 2023 Loongson Technology Corporation Limited
+#
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+
+class LoongArchMachine(QemuSystemTest):
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+    timeout = 120
+
+    ASSET_KERNEL = Asset(
+        ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+         'releases/download/2024-05-30/vmlinuz.efi'),
+        '08b88a45f48a5fd92260bae895be4e5175be2397481a6f7821b9f39b2965b79e')
+    ASSET_INITRD = Asset(
+        ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+         'releases/download/2024-05-30/ramdisk'),
+        '03d6fb6f8ee64ecac961120a0bdacf741f17b3bee2141f17fa01908c8baf176a')
+    ASSET_BIOS = Asset(
+        ('https://github.com/yangxiaojuan-loongson/qemu-binary/'
+         'releases/download/2024-05-30/QEMU_EFI.fd'),
+        '937c1e7815e2340150c194a9f8f0474259038a3d7b8845ed62cc08163c46bea1')
+
+    def wait_for_console_pattern(self, success_message, vm=None):
+        wait_for_console_pattern(self, success_message,
+                                 failure_message='Kernel panic - not syncing',
+                                 vm=vm)
+
+    def test_loongarch64_devices(self):
+
+        self.set_machine('virt')
+
+        kernel_path = self.ASSET_KERNEL.fetch()
+        initrd_path = self.ASSET_INITRD.fetch()
+        bios_path = self.ASSET_BIOS.fetch()
+
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                               'root=/dev/ram rdinit=/sbin/init console=ttyS0,115200')
+        self.vm.add_args('-nographic',
+                         '-smp', '4',
+                         '-m', '1024',
+                         '-cpu', 'la464',
+                         '-kernel', kernel_path,
+                         '-initrd', initrd_path,
+                         '-bios', bios_path,
+                         '-append', kernel_command_line)
+        self.vm.launch()
+        self.wait_for_console_pattern('Run /sbin/init as init process')
+        exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+                                          'processor		: 3')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_m68k_nextcube.py b/tests/functional/test_m68k_nextcube.py
new file mode 100755
index 0000000000..89385a134a
--- /dev/null
+++ b/tests/functional/test_m68k_nextcube.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a VM and run OCR on the framebuffer
+#
+# Copyright (c) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+import time
+
+from qemu_test import QemuSystemTest, Asset
+from unittest import skipUnless
+
+from qemu_test.tesseract import tesseract_available, tesseract_ocr
+
+PIL_AVAILABLE = True
+try:
+    from PIL import Image
+except ImportError:
+    PIL_AVAILABLE = False
+
+
+class NextCubeMachine(QemuSystemTest):
+
+    timeout = 15
+
+    ASSET_ROM = Asset(('https://sourceforge.net/p/previous/code/1350/tree/'
+                       'trunk/src/Rev_2.5_v66.BIN?format=raw'),
+                      '1b753890b67095b73e104c939ddf62eca9e7d0aedde5108e3893b0ed9d8000a4')
+
+    def check_bootrom_framebuffer(self, screenshot_path):
+        rom_path = self.ASSET_ROM.fetch()
+
+        self.vm.add_args('-bios', rom_path)
+        self.vm.launch()
+
+        self.log.info('VM launched, waiting for display')
+        # TODO: Use avocado.utils.wait.wait_for to catch the
+        #       'displaysurface_create 1120x832' trace-event.
+        time.sleep(2)
+
+        self.vm.cmd('human-monitor-command',
+                    command_line='screendump %s' % screenshot_path)
+
+    @skipUnless(PIL_AVAILABLE, 'Python PIL not installed')
+    def test_bootrom_framebuffer_size(self):
+        self.set_machine('next-cube')
+        screenshot_path = os.path.join(self.workdir, "dump.ppm")
+        self.check_bootrom_framebuffer(screenshot_path)
+
+        width, height = Image.open(screenshot_path).size
+        self.assertEqual(width, 1120)
+        self.assertEqual(height, 832)
+
+    # Tesseract 4 adds a new OCR engine based on LSTM neural networks. The
+    # new version is faster and more accurate than version 3. The drawback is
+    # that it is still alpha-level software.
+    @skipUnless(tesseract_available(4), 'tesseract OCR tool not available')
+    def test_bootrom_framebuffer_ocr_with_tesseract(self):
+        self.set_machine('next-cube')
+        screenshot_path = os.path.join(self.workdir, "dump.ppm")
+        self.check_bootrom_framebuffer(screenshot_path)
+        lines = tesseract_ocr(screenshot_path)
+        text = '\n'.join(lines)
+        self.assertIn('Testing the FPU', text)
+        self.assertIn('System test failed. Error code', text)
+        self.assertIn('Boot command', text)
+        self.assertIn('Next>', text)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_mem_addr_space.py b/tests/functional/test_mem_addr_space.py
new file mode 100755
index 0000000000..bb0cf062ca
--- /dev/null
+++ b/tests/functional/test_mem_addr_space.py
@@ -0,0 +1,314 @@
+#!/usr/bin/env python3
+#
+# Check for crash when using memory beyond the available guest processor
+# address space.
+#
+# Copyright (c) 2023 Red Hat, Inc.
+#
+# Author:
+#  Ani Sinha <anisinha@redhat.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+from qemu_test import QemuSystemTest
+import time
+
+class MemAddrCheck(QemuSystemTest):
+    # after launch, in order to generate the logs from QEMU we need to
+    # wait for some time. Launching and then immediately shutting down
+    # the VM generates empty logs. A delay of 1 second is added for
+    # this reason.
+    DELAY_Q35_BOOT_SEQUENCE = 1
+
+    # first, lets test some 32-bit processors.
+    # for all 32-bit cases, pci64_hole_size is 0.
+    def test_phybits_low_pse36(self):
+        """
+        With pse36 feature ON, a processor has 36 bits of addressing. So it can
+        access up to a maximum of 64GiB of memory. Memory hotplug region begins
+        at 4 GiB boundary when "above_4g_mem_size" is 0 (this would be true when
+        we have 0.5 GiB of VM memory, see pc_q35_init()). This means total
+        hotpluggable memory size is 60 GiB. Per slot, we reserve 1 GiB of memory
+        for dimm alignment for all machines. That leaves total hotpluggable
+        actual memory size of 59 GiB. If the VM is started with 0.5 GiB of
+        memory, maxmem should be set to a maximum value of 59.5 GiB to ensure
+        that the processor can address all memory directly.
+        Note that 64-bit pci hole size is 0 in this case. If maxmem is set to
+        59.6G, QEMU should fail to start with a message "phy-bits are too low".
+        If maxmem is set to 59.5G with all other QEMU parameters identical, QEMU
+        should start fine.
+        """
+        self.vm.add_args('-S', '-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=59.6G',
+                         '-cpu', 'pentium,pse36=on', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_low_pae(self):
+        """
+        With pae feature ON, a processor has 36 bits of addressing. So it can
+        access up to a maximum of 64GiB of memory. Rest is the same as the case
+        with pse36 above.
+        """
+        self.vm.add_args('-S', '-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=59.6G',
+                         '-cpu', 'pentium,pae=on', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_pentium_pse36(self):
+        """
+        Setting maxmem to 59.5G and making sure that QEMU can start with the
+        same options as the failing case above with pse36 cpu feature.
+        """
+        self.vm.add_args('-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=59.5G',
+                         '-cpu', 'pentium,pse36=on', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_pentium_pae(self):
+        """
+        Test is same as above but now with pae cpu feature turned on.
+        Setting maxmem to 59.5G and making sure that QEMU can start fine
+        with the same options as the case above.
+        """
+        self.vm.add_args('-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=59.5G',
+                         '-cpu', 'pentium,pae=on', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_pentium2(self):
+        """
+        Pentium2 has 36 bits of addressing, so its same as pentium
+        with pse36 ON.
+        """
+        self.vm.add_args('-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=59.5G',
+                         '-cpu', 'pentium2', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_low_nonpse36(self):
+        """
+        Pentium processor has 32 bits of addressing without pse36 or pae
+        so it can access physical address up to 4 GiB. Setting maxmem to
+        4 GiB should make QEMU fail to start with "phys-bits too low"
+        message because the region for memory hotplug is always placed
+        above 4 GiB due to the PCI hole and simplicity.
+        """
+        self.vm.add_args('-S', '-machine', 'q35', '-m',
+                         '512,slots=1,maxmem=4G',
+                         '-cpu', 'pentium', '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    # now lets test some 64-bit CPU cases.
+    def test_phybits_low_tcg_q35_70_amd(self):
+        """
+        For q35 7.1 machines and above, there is a HT window that starts at
+        1024 GiB and ends at 1 TiB - 1. If the max GPA falls in this range,
+        "above_4G" memory is adjusted to start at 1 TiB boundary for AMD cpus
+        in the default case. Lets test without that case for machines 7.0.
+        For q35-7.0 machines, "above 4G" memory starts are 4G.
+        pci64_hole size is 32 GiB. Since TCG_PHYS_ADDR_BITS is defined to
+        be 40, TCG emulated CPUs have maximum of 1 TiB (1024 GiB) of
+        directly addressable memory.
+        Hence, maxmem value at most can be
+        1024 GiB - 4 GiB - 1 GiB per slot for alignment - 32 GiB + 0.5 GiB
+        which is equal to 987.5 GiB. Setting the value to 988 GiB should
+        make QEMU fail with the error message.
+        """
+        self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
+                         '512,slots=1,maxmem=988G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_low_tcg_q35_71_amd(self):
+        """
+        AMD_HT_START is defined to be at 1012 GiB. So for q35 machines
+        version > 7.0 and AMD cpus, instead of 1024 GiB limit for 40 bit
+        processor address space, it has to be 1012 GiB , that is 12 GiB
+        less than the case above in order to accommodate HT hole.
+        Make sure QEMU fails when maxmem size is 976 GiB (12 GiB less
+        than 988 GiB).
+        """
+        self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
+                         '512,slots=1,maxmem=976G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_tcg_q35_70_amd(self):
+        """
+        Same as q35-7.0 AMD case except that here we check that QEMU can
+        successfully start when maxmem is < 988G.
+        """
+        self.vm.add_args('-S', '-machine', 'pc-q35-7.0', '-m',
+                         '512,slots=1,maxmem=987.5G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_tcg_q35_71_amd(self):
+        """
+        Same as q35-7.1 AMD case except that here we check that QEMU can
+        successfully start when maxmem is < 976G.
+        """
+        self.vm.add_args('-S', '-machine', 'pc-q35-7.1', '-m',
+                         '512,slots=1,maxmem=975.5G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_tcg_q35_71_intel(self):
+        """
+        Same parameters as test_phybits_low_tcg_q35_71_amd() but use
+        Intel cpu instead. QEMU should start fine in this case as
+        "above_4G" memory starts at 4G.
+        """
+        self.vm.add_args('-S', '-cpu', 'Skylake-Server',
+                         '-machine', 'pc-q35-7.1', '-m',
+                         '512,slots=1,maxmem=976G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_low_tcg_q35_71_amd_41bits(self):
+        """
+        AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+        By setting maxram above 1012 GiB  - 32 GiB - 4 GiB = 976 GiB, we can
+        force "above_4G" memory to start at 1 TiB for q35-7.1 machines
+        (max GPA will be above AMD_HT_START which is defined as 1012 GiB).
+
+        With pci_64_hole size at 32 GiB, in this case, maxmem should be 991.5
+        GiB with 1 GiB per slot for alignment and 0.5 GiB as non-hotplug
+        memory for the VM (1024 - 32 - 1 + 0.5). With 992 GiB, QEMU should
+        fail to start.
+        """
+        self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+                         '-machine', 'pc-q35-7.1', '-m',
+                         '512,slots=1,maxmem=992G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_tcg_q35_71_amd_41bits(self):
+        """
+        AMD processor with 41 bits. Max cpu hw address = 2 TiB.
+        Same as above but by setting maxram between 976 GiB and 992 Gib,
+        QEMU should start fine.
+        """
+        self.vm.add_args('-S', '-cpu', 'EPYC-v4,phys-bits=41',
+                         '-machine', 'pc-q35-7.1', '-m',
+                         '512,slots=1,maxmem=990G',
+                         '-display', 'none',
+                         '-object', 'memory-backend-ram,id=mem1,size=1G',
+                         '-device', 'pc-dimm,id=vm0,memdev=mem1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_low_tcg_q35_intel_cxl(self):
+        """
+        cxl memory window starts after memory device range. Here, we use 1 GiB
+        of cxl window memory. 4G_mem end aligns at 4G. pci64_hole is 32 GiB and
+        starts after the cxl memory window.
+        So maxmem here should be at most 986 GiB considering all memory boundary
+        alignment constraints with 40 bits (1 TiB) of processor physical bits.
+        """
+        self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+                         '-machine', 'q35,cxl=on', '-m',
+                         '512,slots=1,maxmem=987G',
+                         '-display', 'none',
+                         '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1',
+                         '-M', 'cxl-fmw.0.targets.0=cxl.1,cxl-fmw.0.size=1G')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        self.vm.wait()
+        self.assertEqual(self.vm.exitcode(), 1, "QEMU exit code should be 1")
+        self.assertRegex(self.vm.get_log(), r'phys-bits too low')
+
+    def test_phybits_ok_tcg_q35_intel_cxl(self):
+        """
+        Same as above but here we do not reserve any cxl memory window. Hence,
+        with the exact same parameters as above, QEMU should start fine even
+        with cxl enabled.
+        """
+        self.vm.add_args('-S', '-cpu', 'Skylake-Server,phys-bits=40',
+                         '-machine', 'q35,cxl=on', '-m',
+                         '512,slots=1,maxmem=987G',
+                         '-display', 'none',
+                         '-device', 'pxb-cxl,bus_nr=12,bus=pcie.0,id=cxl.1')
+        self.vm.set_qmp_monitor(enabled=False)
+        self.vm.launch()
+        time.sleep(self.DELAY_Q35_BOOT_SEQUENCE)
+        self.vm.shutdown()
+        self.assertNotRegex(self.vm.get_log(), r'phys-bits too low')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_microblaze_s3adsp1800.py b/tests/functional/test_microblaze_s3adsp1800.py
new file mode 100755
index 0000000000..4f692ffdb1
--- /dev/null
+++ b/tests/functional/test_microblaze_s3adsp1800.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a microblaze Linux kernel and checks the console
+#
+# Copyright (c) 2018, 2021 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import time
+from qemu_test import exec_command, exec_command_and_wait_for_pattern
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import archive_extract
+
+class MicroblazeMachine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_IMAGE = Asset(
+        ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+         'day17.tar.xz'),
+        '3ba7439dfbea7af4876662c97f8e1f0cdad9231fc166e4861d17042489270057')
+
+    def test_microblaze_s3adsp1800(self):
+        self.set_machine('petalogix-s3adsp1800')
+        file_path = self.ASSET_IMAGE.fetch()
+        archive_extract(file_path, self.workdir)
+        self.vm.set_console()
+        self.vm.add_args('-kernel', self.workdir + '/day17/ballerina.bin')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'This architecture does not have '
+                                       'kernel memory protection')
+        # Note:
+        # The kernel sometimes gets stuck after the "This architecture ..."
+        # message, that's why we don't test for a later string here. This
+        # needs some investigation by a microblaze wizard one day...
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_microblazeel_s3adsp1800.py b/tests/functional/test_microblazeel_s3adsp1800.py
new file mode 100755
index 0000000000..faa3927f2e
--- /dev/null
+++ b/tests/functional/test_microblazeel_s3adsp1800.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a microblaze Linux kernel and checks the console
+#
+# Copyright (c) 2018, 2021 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import time
+from qemu_test import exec_command, exec_command_and_wait_for_pattern
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import archive_extract
+
+class MicroblazeelMachine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_IMAGE = Asset(
+        ('http://www.qemu-advent-calendar.org/2023/download/day13.tar.gz'),
+        'b9b3d43c5dd79db88ada495cc6e0d1f591153fe41355e925d791fbf44de50c22')
+
+    def test_microblazeel_s3adsp1800(self):
+        self.require_netdev('user')
+        self.set_machine('petalogix-s3adsp1800')
+        file_path = self.ASSET_IMAGE.fetch()
+        archive_extract(file_path, self.workdir)
+        self.vm.set_console()
+        self.vm.add_args('-kernel', self.workdir + '/day13/xmaton.bin')
+        self.vm.add_args('-nic', 'user,tftp=' + self.workdir + '/day13/')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'QEMU Advent Calendar 2023')
+        time.sleep(0.1)
+        exec_command(self, 'root')
+        time.sleep(0.1)
+        exec_command_and_wait_for_pattern(self,
+                'tftp -g -r xmaton.png 10.0.2.2 ; md5sum xmaton.png',
+                '821cd3cab8efd16ad6ee5acc3642a8ea')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_mips64el_fuloong2e.py b/tests/functional/test_mips64el_fuloong2e.py
new file mode 100755
index 0000000000..7688a32713
--- /dev/null
+++ b/tests/functional/test_mips64el_fuloong2e.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the Lemote Fuloong-2E machine.
+#
+# Copyright (c) 2019 Philippe Mathieu-Daudé <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import subprocess
+
+from qemu_test import QemuSystemTest
+from qemu_test import wait_for_console_pattern
+from unittest import skipUnless
+
+class MipsFuloong2e(QemuSystemTest):
+
+    timeout = 60
+
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    @skipUnless(os.getenv('RESCUE_YL_PATH'), 'RESCUE_YL_PATH not available')
+    def test_linux_kernel_2_6_27_isa_serial(self):
+        # Recovery system for the Yeeloong laptop
+        # (enough to test the fuloong2e southbridge, accessing its ISA bus)
+        # http://dev.lemote.com/files/resource/download/rescue/rescue-yl
+        sha = 'ab588d3316777c62cc81baa20ac92e98b01955c244dff3794b711bc34e26e51d'
+        kernel_path = os.getenv('RESCUE_YL_PATH')
+        output = subprocess.check_output(['sha256sum', kernel_path])
+        checksum = output.split()[0]
+        assert checksum.decode("utf-8") == sha
+
+        self.set_machine('fuloong2e')
+        self.vm.set_console()
+        self.vm.add_args('-kernel', kernel_path)
+        self.vm.launch()
+        wait_for_console_pattern(self, 'Linux version 2.6.27.7lemote')
+        cpu_revision = 'CPU revision is: 00006302 (ICT Loongson-2)'
+        wait_for_console_pattern(self, cpu_revision)
+
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_mips64el_loongson3v.py b/tests/functional/test_mips64el_loongson3v.py
new file mode 100755
index 0000000000..55d62928c7
--- /dev/null
+++ b/tests/functional/test_mips64el_loongson3v.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+#
+# Functional tests for the Generic Loongson-3 Platform.
+#
+# Copyright (c) 2021 Jiaxun Yang <jiaxun.yang@flygoat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+import os
+import time
+
+from unittest import skipUnless
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class MipsLoongson3v(QemuSystemTest):
+    timeout = 60
+
+    ASSET_PMON = Asset(
+        ('https://github.com/loongson-community/pmon/'
+         'releases/download/20210112/pmon-3avirt.bin'),
+        'fcdf6bb2cb7885a4a62f31fcb0d5e368bac7b6cea28f40c6dfa678af22fea20a')
+
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    def test_pmon_serial_console(self):
+        self.set_machine('loongson3-virt')
+
+        pmon_path = self.ASSET_PMON.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args('-bios', pmon_path)
+        self.vm.launch()
+        wait_for_console_pattern(self, 'CPU GODSON3 BogoMIPS:')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_netdev_ethtool.py b/tests/functional/test_netdev_ethtool.py
new file mode 100755
index 0000000000..d5b911c918
--- /dev/null
+++ b/tests/functional/test_netdev_ethtool.py
@@ -0,0 +1,88 @@
+#!/usr/bin/env python3
+#
+# ethtool tests for emulated network devices
+#
+# This test leverages ethtool's --test sequence to validate network
+# device behaviour.
+#
+# SPDX-License-Identifier: GPL-2.0-or-late
+
+from unittest import skip
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class NetDevEthtool(QemuSystemTest):
+
+    # Runs in about 17s under KVM, 19s under TCG, 25s under GCOV
+    timeout = 45
+
+    # Fetch assets from the netdev-ethtool subdir of my shared test
+    # images directory on fileserver.linaro.org.
+    ASSET_BASEURL = ('https://fileserver.linaro.org/s/kE4nCFLdQcoBF9t/'
+                     'download?path=%2Fnetdev-ethtool&files=')
+    ASSET_BZIMAGE = Asset(
+        ASSET_BASEURL + "bzImage",
+        "ed62ee06ea620b1035747f3f66a5e9fc5d3096b29f75562ada888b04cd1c4baf")
+    ASSET_ROOTFS = Asset(
+        ASSET_BASEURL + "rootfs.squashfs",
+        "8f0207e3c4d40832ae73c1a927e42ca30ccb1e71f047acb6ddb161ba422934e6")
+
+    def common_test_code(self, netdev, extra_args=None):
+        self.set_machine('q35')
+
+        # This custom kernel has drivers for all the supported network
+        # devices we can emulate in QEMU
+        kernel = self.ASSET_BZIMAGE.fetch()
+        rootfs = self.ASSET_ROOTFS.fetch()
+
+        append = 'printk.time=0 console=ttyS0 '
+        append += 'root=/dev/sr0 rootfstype=squashfs '
+
+        # any additional kernel tweaks for the test
+        if extra_args:
+            append += extra_args
+
+        # finally invoke ethtool directly
+        append += ' init=/usr/sbin/ethtool -- -t eth1 offline'
+
+        # add the rootfs via a readonly cdrom image
+        drive = f"file={rootfs},if=ide,index=0,media=cdrom"
+
+        self.vm.add_args('-kernel', kernel,
+                         '-append', append,
+                         '-drive', drive,
+                         '-device', netdev)
+
+        self.vm.set_console(console_index=0)
+        self.vm.launch()
+
+        wait_for_console_pattern(self,
+                                 "The test result is PASS",
+                                 "The test result is FAIL",
+                                 vm=None)
+        # no need to gracefully shutdown, just finish
+        self.vm.kill()
+
+    def test_igb(self):
+        self.common_test_code("igb")
+
+    def test_igb_nomsi(self):
+        self.common_test_code("igb", "pci=nomsi")
+
+    # It seems the other popular cards we model in QEMU currently fail
+    # the pattern test with:
+    #
+    #   pattern test failed (reg 0x00178): got 0x00000000 expected 0x00005A5A
+    #
+    # So for now we skip them.
+
+    @skip("Incomplete reg 0x00178 support")
+    def test_e1000(self):
+        self.common_test_code("e1000")
+
+    @skip("Incomplete reg 0x00178 support")
+    def test_i82550(self):
+        self.common_test_code("i82550")
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_pc_cpu_hotplug_props.py b/tests/functional/test_pc_cpu_hotplug_props.py
new file mode 100755
index 0000000000..9d5a37cb17
--- /dev/null
+++ b/tests/functional/test_pc_cpu_hotplug_props.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python3
+#
+# Ensure CPU die-id can be omitted on -device
+#
+#  Copyright (c) 2019 Red Hat Inc
+#
+# Author:
+#  Eduardo Habkost <ehabkost@redhat.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+from qemu_test import QemuSystemTest
+
+class OmittedCPUProps(QemuSystemTest):
+
+    def test_no_die_id(self):
+        self.vm.add_args('-nodefaults', '-S')
+        self.vm.add_args('-smp', '1,sockets=2,cores=2,threads=2,maxcpus=8')
+        self.vm.add_args('-device', 'qemu64-x86_64-cpu,socket-id=1,core-id=0,thread-id=0')
+        self.vm.launch()
+        self.assertEqual(len(self.vm.cmd('query-cpus-fast')), 2)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_hv.py b/tests/functional/test_ppc64_hv.py
new file mode 100755
index 0000000000..1a6e4b6d07
--- /dev/null
+++ b/tests/functional/test_ppc64_hv.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python3
+#
+# Tests that specifically try to exercise hypervisor features of the
+# target machines. powernv supports the Power hypervisor ISA, and
+# pseries supports the nested-HV hypervisor spec.
+#
+# Copyright (c) 2023 IBM Corporation
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from unittest import skipIf, skipUnless
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern, exec_command
+import os
+import time
+import subprocess
+from datetime import datetime
+
+deps = ["xorriso"] # dependent tools needed in the test setup/box.
+
+def which(tool):
+    """ looks up the full path for @tool, returns None if not found
+        or if @tool does not have executable permissions.
+    """
+    paths=os.getenv('PATH')
+    for p in paths.split(os.path.pathsep):
+        p = os.path.join(p, tool)
+        if os.path.exists(p) and os.access(p, os.X_OK):
+            return p
+    return None
+
+def missing_deps():
+    """ returns True if any of the test dependent tools are absent.
+    """
+    for dep in deps:
+        if which(dep) is None:
+            return True
+    return False
+
+# Alpine is a light weight distro that supports QEMU. These tests boot
+# that on the machine then run a QEMU guest inside it in KVM mode,
+# that runs the same Alpine distro image.
+# QEMU packages are downloaded and installed on each test. That's not a
+# large download, but it may be more polite to create qcow2 image with
+# QEMU already installed and use that.
+# XXX: The order of these tests seems to matter, see git blame.
+@skipIf(missing_deps(), 'dependencies (%s) not installed' % ','.join(deps))
+@skipUnless(os.getenv('QEMU_TEST_ALLOW_LARGE_STORAGE'), 'storage limited')
+class HypervisorTest(QemuSystemTest):
+
+    timeout = 1000
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+    panic_message = 'Kernel panic - not syncing'
+    good_message = 'VFS: Cannot open root device'
+
+    ASSET_ISO = Asset(
+        ('https://dl-cdn.alpinelinux.org/alpine/v3.18/'
+         'releases/ppc64le/alpine-standard-3.18.4-ppc64le.iso'),
+        'c26b8d3e17c2f3f0fed02b4b1296589c2390e6d5548610099af75300edd7b3ff')
+
+    def extract_from_iso(self, iso, path):
+        """
+        Extracts a file from an iso file into the test workdir
+
+        :param iso: path to the iso file
+        :param path: path within the iso file of the file to be extracted
+        :returns: path of the extracted file
+        """
+        filename = os.path.basename(path)
+
+        cwd = os.getcwd()
+        os.chdir(self.workdir)
+
+        with open(filename, "w") as outfile:
+            cmd = "xorriso -osirrox on -indev %s -cpx %s %s" % (iso, path, filename)
+            subprocess.run(cmd.split(),
+                           stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
+        os.chmod(filename, 0o600)
+        os.chdir(cwd)
+
+        # Return complete path to extracted file.  Because callers to
+        # extract_from_iso() specify 'path' with a leading slash, it is
+        # necessary to use os.path.relpath() as otherwise os.path.join()
+        # interprets it as an absolute path and drops the self.workdir part.
+        return os.path.normpath(os.path.join(self.workdir, filename))
+
+    def setUp(self):
+        super().setUp()
+
+        self.iso_path = self.ASSET_ISO.fetch()
+        self.vmlinuz = self.extract_from_iso(self.iso_path, '/boot/vmlinuz-lts')
+        self.initramfs = self.extract_from_iso(self.iso_path, '/boot/initramfs-lts')
+
+    def do_start_alpine(self):
+        self.vm.set_console()
+        kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+        self.vm.add_args("-kernel", self.vmlinuz)
+        self.vm.add_args("-initrd", self.initramfs)
+        self.vm.add_args("-smp", "4", "-m", "2g")
+        self.vm.add_args("-drive", f"file={self.iso_path},format=raw,if=none,id=drive0")
+
+        self.vm.launch()
+        wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18')
+        exec_command(self, 'root')
+        wait_for_console_pattern(self, 'localhost login:')
+        wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.')
+        # If the time is wrong, SSL certificates can fail.
+        exec_command(self, 'date -s "' + datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S' + '"'))
+        exec_command(self, 'setup-alpine -qe')
+        wait_for_console_pattern(self, 'Updating repository indexes... done.')
+
+    def do_stop_alpine(self):
+        exec_command(self, 'poweroff')
+        wait_for_console_pattern(self, 'alpine:~#')
+        self.vm.wait()
+
+    def do_setup_kvm(self):
+        exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/main > /etc/apk/repositories')
+        wait_for_console_pattern(self, 'alpine:~#')
+        exec_command(self, 'echo http://dl-cdn.alpinelinux.org/alpine/v3.18/community >> /etc/apk/repositories')
+        wait_for_console_pattern(self, 'alpine:~#')
+        exec_command(self, 'apk update')
+        wait_for_console_pattern(self, 'alpine:~#')
+        exec_command(self, 'apk add qemu-system-ppc64')
+        wait_for_console_pattern(self, 'alpine:~#')
+        exec_command(self, 'modprobe kvm-hv')
+        wait_for_console_pattern(self, 'alpine:~#')
+
+    # This uses the host's block device as the source file for guest block
+    # device for install media. This is a bit hacky but allows reuse of the
+    # iso without having a passthrough filesystem configured.
+    def do_test_kvm(self, hpt=False):
+        if hpt:
+            append = 'disable_radix'
+        else:
+            append = ''
+        exec_command(self, 'qemu-system-ppc64 -nographic -smp 2 -m 1g '
+                           '-machine pseries,x-vof=on,accel=kvm '
+                           '-machine cap-cfpc=broken,cap-sbbc=broken,'
+                                    'cap-ibs=broken,cap-ccf-assist=off '
+                           '-drive file=/dev/nvme0n1,format=raw,readonly=on '
+                           '-initrd /media/nvme0n1/boot/initramfs-lts '
+                           '-kernel /media/nvme0n1/boot/vmlinuz-lts '
+                           '-append \'usbcore.nousb ' + append + '\'')
+        # Alpine 3.18 kernel seems to crash in XHCI USB driver.
+        wait_for_console_pattern(self, 'Welcome to Alpine Linux 3.18')
+        exec_command(self, 'root')
+        wait_for_console_pattern(self, 'localhost login:')
+        wait_for_console_pattern(self, 'You may change this message by editing /etc/motd.')
+        exec_command(self, 'poweroff >& /dev/null')
+        wait_for_console_pattern(self, 'localhost:~#')
+        wait_for_console_pattern(self, 'reboot: Power down')
+        time.sleep(1)
+        exec_command(self, '')
+        wait_for_console_pattern(self, 'alpine:~#')
+
+    def test_hv_pseries(self):
+        self.require_accelerator("tcg")
+        self.set_machine('pseries')
+        self.vm.add_args("-accel", "tcg,thread=multi")
+        self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
+        self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on")
+        self.do_start_alpine()
+        self.do_setup_kvm()
+        self.do_test_kvm()
+        self.do_stop_alpine()
+
+    def test_hv_pseries_kvm(self):
+        self.require_accelerator("kvm")
+        self.set_machine('pseries')
+        self.vm.add_args("-accel", "kvm")
+        self.vm.add_args('-device', 'nvme,serial=1234,drive=drive0')
+        self.vm.add_args("-machine", "x-vof=on,cap-nested-hv=on,cap-ccf-assist=off")
+        self.do_start_alpine()
+        self.do_setup_kvm()
+        self.do_test_kvm()
+        self.do_stop_alpine()
+
+    def test_hv_powernv(self):
+        self.require_accelerator("tcg")
+        self.set_machine('powernv')
+        self.vm.add_args("-accel", "tcg,thread=multi")
+        self.vm.add_args('-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234,drive=drive0',
+                         '-device', 'e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0',
+                         '-netdev', 'user,id=net0,hostfwd=::20022-:22,hostname=alpine')
+        self.do_start_alpine()
+        self.do_setup_kvm()
+        self.do_test_kvm()
+        self.do_test_kvm(True)
+        self.do_stop_alpine()
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_powernv.py b/tests/functional/test_ppc64_powernv.py
new file mode 100755
index 0000000000..67497d6404
--- /dev/null
+++ b/tests/functional/test_ppc64_powernv.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc powernv machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class powernvMachine(QemuSystemTest):
+
+    timeout = 90
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+    panic_message = 'Kernel panic - not syncing'
+    good_message = 'VFS: Cannot open root device'
+
+    ASSET_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+         'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'),
+        '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f')
+
+    def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE):
+        self.require_accelerator("tcg")
+        kernel_path = self.ASSET_KERNEL.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args('-kernel', kernel_path,
+                         '-append', command_line)
+        self.vm.launch()
+
+    def test_linux_boot(self):
+        self.set_machine('powernv')
+        self.do_test_linux_boot()
+        console_pattern = 'VFS: Cannot open root device'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+    def test_linux_smp_boot(self):
+        self.set_machine('powernv')
+        self.vm.add_args('-smp', '4')
+        self.do_test_linux_boot()
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_linux_smp_hpt_boot(self):
+        self.set_machine('powernv')
+        self.vm.add_args('-smp', '4')
+        self.do_test_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
+                                'disable_radix')
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
+                                 self.panic_message)
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_linux_smt_boot(self):
+        self.set_machine('powernv')
+        self.vm.add_args('-smp', '4,threads=4')
+        self.do_test_linux_boot()
+        console_pattern = 'CPU maps initialized for 4 threads per core'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_linux_big_boot(self):
+        self.set_machine('powernv')
+        self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
+
+        # powernv does not support NUMA
+        self.do_test_linux_boot()
+        console_pattern = 'CPU maps initialized for 4 threads per core'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc64_pseries.py b/tests/functional/test_ppc64_pseries.py
new file mode 100755
index 0000000000..fdc404ed03
--- /dev/null
+++ b/tests/functional/test_ppc64_pseries.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class pseriesMachine(QemuSystemTest):
+
+    timeout = 90
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 console=hvc0 '
+    panic_message = 'Kernel panic - not syncing'
+    good_message = 'VFS: Cannot open root device'
+
+    ASSET_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/'
+         'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'),
+        '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f')
+
+    def do_test_ppc64_linux_boot(self, kernel_command_line = KERNEL_COMMON_COMMAND_LINE):
+        kernel_path = self.ASSET_KERNEL.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args('-kernel', kernel_path,
+                         '-append', kernel_command_line)
+        self.vm.launch()
+
+    def test_ppc64_vof_linux_boot(self):
+        self.set_machine('pseries')
+        self.vm.add_args('-machine', 'x-vof=on')
+        self.do_test_ppc64_linux_boot()
+        console_pattern = 'VFS: Cannot open root device'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+    def test_ppc64_linux_boot(self):
+        self.set_machine('pseries')
+        self.do_test_ppc64_linux_boot()
+        console_pattern = 'VFS: Cannot open root device'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+
+    def test_ppc64_linux_smp_boot(self):
+        self.set_machine('pseries')
+        self.vm.add_args('-smp', '4')
+        self.do_test_ppc64_linux_boot()
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_ppc64_linux_hpt_smp_boot(self):
+        self.set_machine('pseries')
+        self.vm.add_args('-smp', '4')
+        self.do_test_ppc64_linux_boot(self.KERNEL_COMMON_COMMAND_LINE +
+                                      'disable_radix')
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, 'hash-mmu: Initializing hash mmu',
+                                 self.panic_message)
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_ppc64_linux_smt_boot(self):
+        self.vm.add_args('-smp', '4,threads=4')
+        self.do_test_ppc64_linux_boot()
+        console_pattern = 'CPU maps initialized for 4 threads per core'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        console_pattern = 'smp: Brought up 1 node, 4 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+    def test_ppc64_linux_big_boot(self):
+        self.set_machine('pseries')
+        self.vm.add_args('-smp', '16,threads=4,cores=2,sockets=2')
+        self.vm.add_args('-m', '512M',
+                         '-object', 'memory-backend-ram,size=256M,id=m0',
+                         '-object', 'memory-backend-ram,size=256M,id=m1')
+        self.vm.add_args('-numa', 'node,nodeid=0,memdev=m0')
+        self.vm.add_args('-numa', 'node,nodeid=1,memdev=m1')
+        self.do_test_ppc64_linux_boot()
+        console_pattern = 'CPU maps initialized for 4 threads per core'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        console_pattern = 'smp: Brought up 2 nodes, 16 CPUs'
+        wait_for_console_pattern(self, console_pattern, self.panic_message)
+        wait_for_console_pattern(self, self.good_message, self.panic_message)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_405.py b/tests/functional/test_ppc_405.py
new file mode 100755
index 0000000000..9851c03ee9
--- /dev/null
+++ b/tests/functional/test_ppc_405.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Test that the U-Boot firmware boots on ppc 405 machines and check the console
+#
+# Copyright (c) 2021 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+class Ppc405Machine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_UBOOT = Asset(
+        ('https://gitlab.com/huth/u-boot/-/raw/taihu-2021-10-09/'
+         'u-boot-taihu.bin'),
+        'a076bb6cdeaafa406330e51e074b66d8878d9036d67d4caa0137be03ee4c112c')
+
+    def do_test_ppc405(self):
+        file_path = self.ASSET_UBOOT.fetch()
+        self.vm.set_console(console_index=1)
+        self.vm.add_args('-bios', file_path)
+        self.vm.launch()
+        wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board')
+        exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP')
+
+    def test_ppc_ref405ep(self):
+        self.require_accelerator("tcg")
+        self.set_machine('ref405ep')
+        self.do_test_ppc405()
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_40p.py b/tests/functional/test_ppc_40p.py
new file mode 100755
index 0000000000..c64e876c1f
--- /dev/null
+++ b/tests/functional/test_ppc_40p.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a PReP/40p machine and checks its serial console.
+#
+# Copyright (c) Philippe Mathieu-Daudé <f4bug@amsat.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+
+from unittest import skipUnless
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+
+class IbmPrep40pMachine(QemuSystemTest):
+
+    timeout = 60
+
+    ASSET_BIOS = Asset(
+        ('http://ftpmirror.your.org/pub/misc/'
+         'ftp.software.ibm.com/rs6000/firmware/'
+         '7020-40p/P12H0456.IMG'),
+        'd957f79c73f760d1455d2286fcd901ed6d06167320eb73511b478a939be25b3f')
+    ASSET_NETBSD40 = Asset(
+        ('https://archive.netbsd.org/pub/NetBSD-archive/'
+         'NetBSD-4.0/prep/installation/floppy/generic_com0.fs'),
+        'f86236e9d01b3f0dd0f5d3b8d5bbd40c68e78b4db560a108358f5ad58e636619')
+    ASSET_NETBSD71 = Asset(
+        ('https://archive.netbsd.org/pub/NetBSD-archive/'
+         'NetBSD-7.1.2/iso/NetBSD-7.1.2-prep.iso'),
+        'cc7cb290b06aaa839362deb7bd9f417ac5015557db24088508330f76c3f825ec')
+
+    # 12H0455 PPS Firmware Licensed Materials
+    # Property of IBM (C) Copyright IBM Corp. 1994.
+    # All rights reserved.
+    # U.S. Government Users Restricted Rights - Use, duplication or disclosure
+    # restricted by GSA ADP Schedule Contract with IBM Corp.
+    @skipUnless(os.getenv('QEMU_TEST_ALLOW_UNTRUSTED_CODE'), 'untrusted code')
+    def test_factory_firmware_and_netbsd(self):
+        self.set_machine('40p')
+        self.require_accelerator("tcg")
+        bios_path = self.ASSET_BIOS.fetch()
+        drive_path = self.ASSET_NETBSD40.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args('-bios', bios_path,
+                         '-fda', drive_path)
+        self.vm.launch()
+        os_banner = 'NetBSD 4.0 (GENERIC) #0: Sun Dec 16 00:49:40 PST 2007'
+        wait_for_console_pattern(self, os_banner)
+        wait_for_console_pattern(self, 'Model: IBM PPS Model 6015')
+
+    def test_openbios_192m(self):
+        self.set_machine('40p')
+        self.require_accelerator("tcg")
+        self.vm.set_console()
+        self.vm.add_args('-m', '192') # test fw_cfg
+
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> Memory: 192M')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,604')
+
+    def test_openbios_and_netbsd(self):
+        self.set_machine('40p')
+        self.require_accelerator("tcg")
+        drive_path = self.ASSET_NETBSD71.fetch()
+        self.vm.set_console()
+        self.vm.add_args('-cdrom', drive_path,
+                         '-boot', 'd')
+
+        self.vm.launch()
+        wait_for_console_pattern(self, 'NetBSD/prep BOOT, Revision 1.9')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_74xx.py b/tests/functional/test_ppc_74xx.py
new file mode 100755
index 0000000000..5386016f26
--- /dev/null
+++ b/tests/functional/test_ppc_74xx.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Smoke tests for 74xx cpus (aka G4).
+#
+# Copyright (c) 2021, IBM Corp.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test import QemuSystemTest
+from qemu_test import wait_for_console_pattern
+
+class ppc74xxCpu(QemuSystemTest):
+
+    timeout = 5
+
+    def test_ppc_7400(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7400')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7410(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7410')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,74xx')
+
+    def test_ppc_7441(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7441')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7445(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7445')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7447(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7447')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7447a(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7447a')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7448(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7448')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,MPC86xx')
+
+    def test_ppc_7450(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7450')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7451(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7451')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7455(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7455')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7457(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7457')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+    def test_ppc_7457a(self):
+        self.require_accelerator("tcg")
+        self.set_machine('g3beige')
+        self.vm.set_console()
+        self.vm.add_args('-cpu', '7457a')
+        self.vm.launch()
+        wait_for_console_pattern(self, '>> OpenBIOS')
+        wait_for_console_pattern(self, '>> CPU type PowerPC,G4')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_amiga.py b/tests/functional/test_ppc_amiga.py
new file mode 100755
index 0000000000..b793b5c432
--- /dev/null
+++ b/tests/functional/test_ppc_amiga.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+#
+# Test AmigaNG boards
+#
+# Copyright (c) 2023 BALATON Zoltan
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import subprocess
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern, run_cmd
+from zipfile import ZipFile
+
+class AmigaOneMachine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_IMAGE = Asset(
+        ('https://www.hyperion-entertainment.com/index.php/'
+         'downloads?view=download&format=raw&file=25'),
+        '8ff39330ba47d4f64de4ee8fd6809e9c010a9ef17fe51e95c3c1d53437cb481f')
+
+    def test_ppc_amigaone(self):
+        self.require_accelerator("tcg")
+        self.set_machine('amigaone')
+        tar_name = 'A1Firmware_Floppy_05-Mar-2005.zip'
+        zip_file = self.ASSET_IMAGE.fetch()
+        with ZipFile(zip_file, 'r') as zf:
+            zf.extractall(path=self.workdir)
+        bios_fh = open(self.workdir + "/u-boot-amigaone.bin", "wb")
+        subprocess.run(['tail', '-c', '524288',
+                        self.workdir + "/floppy_edition/updater.image"],
+                        stdout=bios_fh)
+
+        self.vm.set_console()
+        self.vm.add_args('-bios', self.workdir + '/u-boot-amigaone.bin')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'FLASH:')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_bamboo.py b/tests/functional/test_ppc_bamboo.py
new file mode 100755
index 0000000000..e72cbdee12
--- /dev/null
+++ b/tests/functional/test_ppc_bamboo.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on the ppc bamboo board and check the console
+#
+# Copyright (c) 2021 Red Hat
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test.utils import archive_extract
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+
+class BambooMachine(QemuSystemTest):
+
+    timeout = 90
+
+    ASSET_IMAGE = Asset(
+        ('http://landley.net/aboriginal/downloads/binaries/'
+         'system-image-powerpc-440fp.tar.gz'),
+        'c12b58f841c775a0e6df4832a55afe6b74814d1565d08ddeafc1fb949a075c5e')
+
+    def test_ppc_bamboo(self):
+        self.set_machine('bamboo')
+        self.require_accelerator("tcg")
+        self.require_netdev('user')
+        file_path = self.ASSET_IMAGE.fetch()
+        archive_extract(file_path, self.workdir)
+        self.vm.set_console()
+        self.vm.add_args('-kernel', self.workdir +
+                                   '/system-image-powerpc-440fp/linux',
+                         '-initrd', self.workdir +
+                                   '/system-image-powerpc-440fp/rootfs.cpio.gz',
+                         '-nic', 'user,model=rtl8139,restrict=on')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'Type exit when done')
+        exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2',
+                                          '10.0.2.2 is alive!')
+        exec_command_and_wait_for_pattern(self, 'halt', 'System Halted')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_mpc8544ds.py b/tests/functional/test_ppc_mpc8544ds.py
new file mode 100755
index 0000000000..2b3f0894ae
--- /dev/null
+++ b/tests/functional/test_ppc_mpc8544ds.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test.utils import archive_extract
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class Mpc8544dsMachine(QemuSystemTest):
+
+    timeout = 90
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+    panic_message = 'Kernel panic - not syncing'
+
+    ASSET_IMAGE = Asset(
+        ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+         'day04.tar.xz'),
+        '88bc83f3c9f3d633bcfc108a6342d677abca247066a2fb8d4636744a0d319f94')
+
+    def test_ppc_mpc8544ds(self):
+        self.require_accelerator("tcg")
+        self.set_machine('mpc8544ds')
+        file_path = self.ASSET_IMAGE.fetch()
+        archive_extract(file_path, self.workdir, member='creek/creek.bin')
+        self.vm.set_console()
+        self.vm.add_args('-kernel', self.workdir + '/creek/creek.bin')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'QEMU advent calendar 2020',
+                                 self.panic_message)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_ppc_virtex_ml507.py b/tests/functional/test_ppc_virtex_ml507.py
new file mode 100755
index 0000000000..ffa9a0633e
--- /dev/null
+++ b/tests/functional/test_ppc_virtex_ml507.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+#
+# Test that Linux kernel boots on ppc machines and check the console
+#
+# Copyright (c) 2018, 2020 Red Hat, Inc.
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+from qemu_test.utils import archive_extract
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+
+class VirtexMl507Machine(QemuSystemTest):
+
+    timeout = 90
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+    panic_message = 'Kernel panic - not syncing'
+
+    ASSET_IMAGE = Asset(
+        ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+         'day08.tar.xz'),
+        'cefe5b8aeb5e9d2d1d4fd22dcf48d917d68d5a765132bf2ddd6332dc393b824c')
+
+    def test_ppc_virtex_ml507(self):
+        self.require_accelerator("tcg")
+        self.set_machine('virtex-ml507')
+        file_path = self.ASSET_IMAGE.fetch()
+        archive_extract(file_path, self.workdir)
+        self.vm.set_console()
+        self.vm.add_args('-kernel', self.workdir + '/hippo/hippo.linux',
+                         '-dtb', self.workdir + '/hippo/virtex440-ml507.dtb',
+                         '-m', '512')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'QEMU advent calendar 2020',
+                                 self.panic_message)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_rx_gdbsim.py b/tests/functional/test_rx_gdbsim.py
new file mode 100755
index 0000000000..5687f756bb
--- /dev/null
+++ b/tests/functional/test_rx_gdbsim.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+#  Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+
+from unittest import skipUnless
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import gzip_uncompress
+
+
+class RxGdbSimMachine(QemuSystemTest):
+
+    timeout = 30
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+    ASSET_UBOOT = Asset(
+        'https://acc.dl.osdn.jp/users/23/23888/u-boot.bin.gz',
+        '7146567d669e91dbac166384b29aeba1715beb844c8551e904b86831bfd9d046')
+    ASSET_DTB = Asset(
+        'https://acc.dl.osdn.jp/users/23/23887/rx-virt.dtb',
+        'aa278d9c1907a4501741d7ee57e7f65c02dd1b3e0323b33c6d4247f1b32cf29a')
+    ASSET_KERNEL = Asset(
+        'http://acc.dl.osdn.jp/users/23/23845/zImage',
+        'baa43205e74a7220ed8482188c5e9ce497226712abb7f4e7e4f825ce19ff9656')
+
+    def test_uboot(self):
+        """
+        U-Boot and checks that the console is operational.
+        """
+        self.set_machine('gdbsim-r5f562n8')
+
+        uboot_path_gz = self.ASSET_UBOOT.fetch()
+        uboot_path = os.path.join(self.workdir, 'u-boot.bin')
+        gzip_uncompress(uboot_path_gz, uboot_path)
+
+        self.vm.set_console()
+        self.vm.add_args('-bios', uboot_path,
+                         '-no-reboot')
+        self.vm.launch()
+        uboot_version = 'U-Boot 2016.05-rc3-23705-ga1ef3c71cb-dirty'
+        wait_for_console_pattern(self, uboot_version)
+        gcc_version = 'rx-unknown-linux-gcc (GCC) 9.0.0 20181105 (experimental)'
+        # FIXME limit baudrate on chardev, else we type too fast
+        #exec_command_and_wait_for_pattern(self, 'version', gcc_version)
+
+    @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab')
+    def test_linux_sash(self):
+        """
+        Boots a Linux kernel and checks that the console is operational.
+        """
+        self.set_machine('gdbsim-r5f562n7')
+
+        dtb_path = self.ASSET_DTB.fetch()
+        kernel_path = self.ASSET_KERNEL.fetch()
+
+        self.vm.set_console()
+        kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE + 'earlycon'
+        self.vm.add_args('-kernel', kernel_path,
+                         '-dtb', dtb_path,
+                         '-no-reboot')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'Sash command shell (version 1.1.1)',
+                                 failure_message='Kernel panic - not syncing')
+        exec_command_and_wait_for_pattern(self, 'printenv', 'TERM=linux')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_s390x_ccw_virtio.py b/tests/functional/test_s390x_ccw_virtio.py
new file mode 100755
index 0000000000..f7acd90a89
--- /dev/null
+++ b/tests/functional/test_s390x_ccw_virtio.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots an s390x Linux guest with ccw and PCI devices
+# attached and checks whether the devices are recognized by Linux
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+#  Cornelia Huck <cohuck@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+import tempfile
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import lzma_uncompress
+
+class S390CCWVirtioMachine(QemuSystemTest):
+    KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
+
+    timeout = 120
+
+    ASSET_BUSTER_KERNEL = Asset(
+        ('https://snapshot.debian.org/archive/debian/'
+         '20201126T092837Z/dists/buster/main/installer-s390x/'
+         '20190702+deb10u6/images/generic/kernel.debian'),
+        'd411d17c39ae7ad38d27534376cbe88b68b403c325739364122c2e6f1537e818')
+    ASSET_BUSTER_INITRD = Asset(
+        ('https://snapshot.debian.org/archive/debian/'
+         '20201126T092837Z/dists/buster/main/installer-s390x/'
+         '20190702+deb10u6/images/generic/initrd.debian'),
+        '836bbd0fe6a5ca81274c28c2b063ea315ce1868660866e9b60180c575fef9fd5')
+
+    ASSET_F31_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive'
+         '/fedora-secondary/releases/31/Server/s390x/os'
+         '/images/kernel.img'),
+        '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b')
+    ASSET_F31_INITRD = Asset(
+        ('https://archives.fedoraproject.org/pub/archive'
+         '/fedora-secondary/releases/31/Server/s390x/os'
+         '/images/initrd.img'),
+        '04c46095b2c49020b1c2327158898b7db747e4892ae319726192fb949716aa9c')
+
+    def wait_for_console_pattern(self, success_message, vm=None):
+        wait_for_console_pattern(self, success_message,
+                                 failure_message='Kernel panic - not syncing',
+                                 vm=vm)
+
+    def wait_for_crw_reports(self):
+        exec_command_and_wait_for_pattern(self,
+                        'while ! (dmesg -c | grep CRW) ; do sleep 1 ; done',
+                        'CRW reports')
+
+    dmesg_clear_count = 1
+    def clear_guest_dmesg(self):
+        exec_command_and_wait_for_pattern(self, 'dmesg -c > /dev/null; '
+                    r'echo dm_clear\ ' + str(self.dmesg_clear_count),
+                    r'dm_clear ' + str(self.dmesg_clear_count))
+        self.dmesg_clear_count += 1
+
+    def test_s390x_devices(self):
+        self.set_machine('s390-ccw-virtio')
+
+        kernel_path = self.ASSET_BUSTER_KERNEL.fetch()
+        initrd_path = self.ASSET_BUSTER_INITRD.fetch()
+
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                              'console=sclp0 root=/dev/ram0 BOOT_DEBUG=3')
+        self.vm.add_args('-nographic',
+                         '-kernel', kernel_path,
+                         '-initrd', initrd_path,
+                         '-append', kernel_command_line,
+                         '-cpu', 'max,prno-trng=off',
+                         '-device', 'virtio-net-ccw,devno=fe.1.1111',
+                         '-device',
+                         'virtio-rng-ccw,devno=fe.2.0000,max_revision=0,id=rn1',
+                         '-device',
+                         'virtio-rng-ccw,devno=fe.3.1234,max_revision=2,id=rn2',
+                         '-device', 'zpci,uid=5,target=zzz',
+                         '-device', 'virtio-net-pci,id=zzz',
+                         '-device', 'zpci,uid=0xa,fid=12,target=serial',
+                         '-device', 'virtio-serial-pci,id=serial',
+                         '-device', 'virtio-balloon-ccw')
+        self.vm.launch()
+
+        shell_ready = "sh: can't access tty; job control turned off"
+        self.wait_for_console_pattern(shell_ready)
+        # first debug shell is too early, we need to wait for device detection
+        exec_command_and_wait_for_pattern(self, 'exit', shell_ready)
+
+        ccw_bus_ids="0.1.1111  0.2.0000  0.3.1234"
+        pci_bus_ids="0005:00:00.0  000a:00:00.0"
+        exec_command_and_wait_for_pattern(self, 'ls /sys/bus/ccw/devices/',
+                                          ccw_bus_ids)
+        exec_command_and_wait_for_pattern(self, 'ls /sys/bus/pci/devices/',
+                                          pci_bus_ids)
+        # check that the device at 0.2.0000 is in legacy mode, while the
+        # device at 0.3.1234 has the virtio-1 feature bit set
+        virtio_rng_features="00000000000000000000000000001100" + \
+                            "10000000000000000000000000000000"
+        virtio_rng_features_legacy="00000000000000000000000000001100" + \
+                                   "00000000000000000000000000000000"
+        exec_command_and_wait_for_pattern(self,
+                        'cat /sys/bus/ccw/devices/0.2.0000/virtio?/features',
+                        virtio_rng_features_legacy)
+        exec_command_and_wait_for_pattern(self,
+                        'cat /sys/bus/ccw/devices/0.3.1234/virtio?/features',
+                        virtio_rng_features)
+        # check that /dev/hwrng works - and that it's gone after ejecting
+        exec_command_and_wait_for_pattern(self,
+                        'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
+                        '10+0 records out')
+        self.clear_guest_dmesg()
+        self.vm.cmd('device_del', id='rn1')
+        self.wait_for_crw_reports()
+        self.clear_guest_dmesg()
+        self.vm.cmd('device_del', id='rn2')
+        self.wait_for_crw_reports()
+        exec_command_and_wait_for_pattern(self,
+                        'dd if=/dev/hwrng of=/dev/null bs=1k count=10',
+                        'dd: /dev/hwrng: No such device')
+        # verify that we indeed have virtio-net devices (without having the
+        # virtio-net driver handy)
+        exec_command_and_wait_for_pattern(self,
+                                    'cat /sys/bus/ccw/devices/0.1.1111/cutype',
+                                    '3832/01')
+        exec_command_and_wait_for_pattern(self,
+                    r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_vendor',
+                    r'0x1af4')
+        exec_command_and_wait_for_pattern(self,
+                    r'cat /sys/bus/pci/devices/0005\:00\:00.0/subsystem_device',
+                    r'0x0001')
+        # check fid propagation
+        exec_command_and_wait_for_pattern(self,
+                    r'cat /sys/bus/pci/devices/000a\:00\:00.0/function_id',
+                    r'0x0000000c')
+        # add another device
+        self.clear_guest_dmesg()
+        self.vm.cmd('device_add', driver='virtio-net-ccw',
+                    devno='fe.0.4711', id='net_4711')
+        self.wait_for_crw_reports()
+        exec_command_and_wait_for_pattern(self, 'for i in 1 2 3 4 5 6 7 ; do '
+                    'if [ -e /sys/bus/ccw/devices/*4711 ]; then break; fi ;'
+                    'sleep 1 ; done ; ls /sys/bus/ccw/devices/',
+                    '0.0.4711')
+        # and detach it again
+        self.clear_guest_dmesg()
+        self.vm.cmd('device_del', id='net_4711')
+        self.vm.event_wait(name='DEVICE_DELETED',
+                           match={'data': {'device': 'net_4711'}})
+        self.wait_for_crw_reports()
+        exec_command_and_wait_for_pattern(self,
+                                          'ls /sys/bus/ccw/devices/0.0.4711',
+                                          'No such file or directory')
+        # test the virtio-balloon device
+        exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+                                          'MemTotal:         115640 kB')
+        self.vm.cmd('human-monitor-command', command_line='balloon 96')
+        exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+                                          'MemTotal:          82872 kB')
+        self.vm.cmd('human-monitor-command', command_line='balloon 128')
+        exec_command_and_wait_for_pattern(self, 'head -n 1 /proc/meminfo',
+                                          'MemTotal:         115640 kB')
+
+
+    def test_s390x_fedora(self):
+        self.set_machine('s390-ccw-virtio')
+
+        kernel_path = self.ASSET_F31_KERNEL.fetch()
+
+        initrd_path_xz = self.ASSET_F31_INITRD.fetch()
+        initrd_path = os.path.join(self.workdir, 'initrd-raw.img')
+        lzma_uncompress(initrd_path_xz, initrd_path)
+
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + ' audit=0 '
+                              'rd.plymouth=0 plymouth.enable=0 rd.rescue')
+        self.vm.add_args('-nographic',
+                         '-smp', '4',
+                         '-m', '512',
+                         '-name', 'Some Guest Name',
+                         '-uuid', '30de4fd9-b4d5-409e-86a5-09b387f70bfa',
+                         '-kernel', kernel_path,
+                         '-initrd', initrd_path,
+                         '-append', kernel_command_line,
+                         '-device', 'zpci,uid=7,target=n',
+                         '-device', 'virtio-net-pci,id=n,mac=02:ca:fe:fa:ce:12',
+                         '-device', 'virtio-rng-ccw,devno=fe.1.9876',
+                         '-device', 'virtio-gpu-ccw,devno=fe.2.5432')
+        self.vm.launch()
+        self.wait_for_console_pattern('Kernel command line: %s'
+                                      % kernel_command_line)
+        self.wait_for_console_pattern('Entering emergency mode')
+
+        # Some tests to see whether the CLI options have been considered:
+        self.log.info("Test whether QEMU CLI options have been considered")
+        exec_command_and_wait_for_pattern(self,
+                        'while ! (dmesg | grep enP7p0s0) ; do sleep 1 ; done',
+                        'virtio_net virtio0 enP7p0s0: renamed')
+        exec_command_and_wait_for_pattern(self, 'lspci',
+                             '0007:00:00.0 Class 0200: Device 1af4:1000')
+        exec_command_and_wait_for_pattern(self,
+                             'cat /sys/class/net/enP7p0s0/address',
+                             '02:ca:fe:fa:ce:12')
+        exec_command_and_wait_for_pattern(self, 'lscss', '0.1.9876')
+        exec_command_and_wait_for_pattern(self, 'lscss', '0.2.5432')
+        exec_command_and_wait_for_pattern(self, 'cat /proc/cpuinfo',
+                             'processors    : 4')
+        exec_command_and_wait_for_pattern(self, 'grep MemTotal /proc/meminfo',
+                             'MemTotal:         499848 kB')
+        exec_command_and_wait_for_pattern(self, 'grep Name /proc/sysinfo',
+                             'Extended Name:   Some Guest Name')
+        exec_command_and_wait_for_pattern(self, 'grep UUID /proc/sysinfo',
+                             '30de4fd9-b4d5-409e-86a5-09b387f70bfa')
+
+        # Disable blinking cursor, then write some stuff into the framebuffer.
+        # QEMU's PPM screendumps contain uncompressed 24-bit values, while the
+        # framebuffer uses 32-bit, so we pad our text with some spaces when
+        # writing to the framebuffer. Since the PPM is uncompressed, we then
+        # can simply read the written "magic bytes" back from the PPM file to
+        # check whether the framebuffer is working as expected.
+        # Unfortunately, this test is flaky, so we don't run it by default
+        if os.getenv('QEMU_TEST_FLAKY_TESTS'):
+            self.log.info("Test screendump of virtio-gpu device")
+            exec_command_and_wait_for_pattern(self,
+                        'while ! (dmesg | grep gpudrmfb) ; do sleep 1 ; done',
+                        'virtio_gpudrmfb frame buffer device')
+            exec_command_and_wait_for_pattern(self,
+                r'echo -e "\e[?25l" > /dev/tty0', ':/#')
+            exec_command_and_wait_for_pattern(self, 'for ((i=0;i<250;i++)); do '
+                'echo " The  qu ick  fo x j ump s o ver  a  laz y d og" >> fox.txt;'
+                'done',
+                ':/#')
+            exec_command_and_wait_for_pattern(self,
+                'dd if=fox.txt of=/dev/fb0 bs=1000 oflag=sync,nocache ; rm fox.txt',
+                '12+0 records out')
+            with tempfile.NamedTemporaryFile(suffix='.ppm',
+                                             prefix='qemu-scrdump-') as ppmfile:
+                self.vm.cmd('screendump', filename=ppmfile.name)
+                ppmfile.seek(0)
+                line = ppmfile.readline()
+                self.assertEqual(line, b"P6\n")
+                line = ppmfile.readline()
+                self.assertEqual(line, b"1280 800\n")
+                line = ppmfile.readline()
+                self.assertEqual(line, b"255\n")
+                line = ppmfile.readline(256)
+                self.assertEqual(line, b"The quick fox jumps over a lazy dog\n")
+        else:
+            self.log.info("Skipped flaky screendump of virtio-gpu device test")
+
+        # Hot-plug a virtio-crypto device and see whether it gets accepted
+        self.log.info("Test hot-plug virtio-crypto device")
+        self.clear_guest_dmesg()
+        self.vm.cmd('object-add', qom_type='cryptodev-backend-builtin',
+                    id='cbe0')
+        self.vm.cmd('device_add', driver='virtio-crypto-ccw', id='crypdev0',
+                    cryptodev='cbe0', devno='fe.0.2342')
+        exec_command_and_wait_for_pattern(self,
+                        'while ! (dmesg -c | grep Accelerator.device) ; do'
+                        ' sleep 1 ; done', 'Accelerator device is ready')
+        exec_command_and_wait_for_pattern(self, 'lscss', '0.0.2342')
+        self.vm.cmd('device_del', id='crypdev0')
+        self.vm.cmd('object-del', id='cbe0')
+        exec_command_and_wait_for_pattern(self,
+                        'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do'
+                        ' sleep 1 ; done', 'Start virtcrypto_remove.')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_s390x_topology.py b/tests/functional/test_s390x_topology.py
new file mode 100755
index 0000000000..20727f6bdf
--- /dev/null
+++ b/tests/functional/test_s390x_topology.py
@@ -0,0 +1,421 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright IBM Corp. 2023
+#
+# Author:
+#  Pierre Morel <pmorel@linux.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+import os
+import time
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import exec_command
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import lzma_uncompress
+
+
+class S390CPUTopology(QemuSystemTest):
+    """
+    S390x CPU topology consists of 4 topology layers, from bottom to top,
+    the cores, sockets, books and drawers and 2 modifiers attributes,
+    the entitlement and the dedication.
+    See: docs/system/s390x/cpu-topology.rst.
+
+    S390x CPU topology is setup in different ways:
+    - implicitly from the '-smp' argument by completing each topology
+      level one after the other beginning with drawer 0, book 0 and
+      socket 0.
+    - explicitly from the '-device' argument on the QEMU command line
+    - explicitly by hotplug of a new CPU using QMP or HMP
+    - it is modified by using QMP 'set-cpu-topology'
+
+    The S390x modifier attribute entitlement depends on the machine
+    polarization, which can be horizontal or vertical.
+    The polarization is changed on a request from the guest.
+    """
+    timeout = 90
+    event_timeout = 10
+
+    KERNEL_COMMON_COMMAND_LINE = ('printk.time=0 '
+                                  'root=/dev/ram '
+                                  'selinux=0 '
+                                  'rdinit=/bin/sh')
+    ASSET_F35_KERNEL = Asset(
+        ('https://archives.fedoraproject.org/pub/archive'
+         '/fedora-secondary/releases/35/Server/s390x/os'
+         '/images/kernel.img'),
+        '1f2dddfd11bb1393dd2eb2e784036fbf6fc11057a6d7d27f9eb12d3edc67ef73')
+
+    ASSET_F35_INITRD = Asset(
+        ('https://archives.fedoraproject.org/pub/archive'
+         '/fedora-secondary/releases/35/Server/s390x/os'
+         '/images/initrd.img'),
+        '1100145fbca00240c8c372ae4b89b48c99844bc189b3dfbc3f481dc60055ca46')
+
+    def wait_until_booted(self):
+        wait_for_console_pattern(self, 'no job control',
+                                 failure_message='Kernel panic - not syncing',
+                                 vm=None)
+
+    def check_topology(self, c, s, b, d, e, t):
+        res = self.vm.qmp('query-cpus-fast')
+        cpus =  res['return']
+        for cpu in cpus:
+            core = cpu['props']['core-id']
+            socket = cpu['props']['socket-id']
+            book = cpu['props']['book-id']
+            drawer = cpu['props']['drawer-id']
+            entitlement = cpu.get('entitlement')
+            dedicated = cpu.get('dedicated')
+            if core == c:
+                self.assertEqual(drawer, d)
+                self.assertEqual(book, b)
+                self.assertEqual(socket, s)
+                self.assertEqual(entitlement, e)
+                self.assertEqual(dedicated, t)
+
+    def kernel_init(self):
+        """
+        We need a VM that supports CPU topology,
+        currently this only the case when using KVM, not TCG.
+        We need a kernel supporting the CPU topology.
+        We need a minimal root filesystem with a shell.
+        """
+        self.require_accelerator("kvm")
+        kernel_path = self.ASSET_F35_KERNEL.fetch()
+        initrd_path_xz = self.ASSET_F35_INITRD.fetch()
+        initrd_path = os.path.join(self.workdir, 'initrd-raw.img')
+        lzma_uncompress(initrd_path_xz, initrd_path)
+
+        self.vm.set_console()
+        kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE
+        self.vm.add_args('-nographic',
+                         '-enable-kvm',
+                         '-cpu', 'max,ctop=on',
+                         '-m', '512',
+                         '-kernel', kernel_path,
+                         '-initrd', initrd_path,
+                         '-append', kernel_command_line)
+
+    def system_init(self):
+        self.log.info("System init")
+        exec_command_and_wait_for_pattern(self,
+                """ mount proc -t proc /proc;
+                    mount sys -t sysfs /sys;
+                    cat /sys/devices/system/cpu/dispatching """,
+                    '0')
+
+    def test_single(self):
+        """
+        This test checks the simplest topology with a single CPU.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+    def test_default(self):
+        """
+        This test checks the implicit topology.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.add_args('-smp',
+                         '13,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+        self.vm.launch()
+        self.wait_until_booted()
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+        self.check_topology(1, 0, 0, 0, 'medium', False)
+        self.check_topology(2, 1, 0, 0, 'medium', False)
+        self.check_topology(3, 1, 0, 0, 'medium', False)
+        self.check_topology(4, 2, 0, 0, 'medium', False)
+        self.check_topology(5, 2, 0, 0, 'medium', False)
+        self.check_topology(6, 0, 1, 0, 'medium', False)
+        self.check_topology(7, 0, 1, 0, 'medium', False)
+        self.check_topology(8, 1, 1, 0, 'medium', False)
+        self.check_topology(9, 1, 1, 0, 'medium', False)
+        self.check_topology(10, 2, 1, 0, 'medium', False)
+        self.check_topology(11, 2, 1, 0, 'medium', False)
+        self.check_topology(12, 0, 0, 1, 'medium', False)
+
+    def test_move(self):
+        """
+        This test checks the topology modification by moving a CPU
+        to another socket: CPU 0 is moved from socket 0 to socket 2.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.add_args('-smp',
+                         '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'socket-id': 2, 'entitlement': 'low'})
+        self.assertEqual(res['return'], {})
+        self.check_topology(0, 2, 0, 0, 'low', False)
+
+    def test_dash_device(self):
+        """
+        This test verifies that a CPU defined with the '-device'
+        command line option finds its right place inside the topology.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.add_args('-smp',
+                         '1,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+        self.vm.add_args('-device', 'max-s390x-cpu,core-id=10')
+        self.vm.add_args('-device',
+                         'max-s390x-cpu,'
+                         'core-id=1,socket-id=0,book-id=1,drawer-id=1,entitlement=low')
+        self.vm.add_args('-device',
+                         'max-s390x-cpu,'
+                         'core-id=2,socket-id=0,book-id=1,drawer-id=1,entitlement=medium')
+        self.vm.add_args('-device',
+                         'max-s390x-cpu,'
+                         'core-id=3,socket-id=1,book-id=1,drawer-id=1,entitlement=high')
+        self.vm.add_args('-device',
+                         'max-s390x-cpu,'
+                         'core-id=4,socket-id=1,book-id=1,drawer-id=1')
+        self.vm.add_args('-device',
+                         'max-s390x-cpu,'
+                         'core-id=5,socket-id=2,book-id=1,drawer-id=1,dedicated=true')
+
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.check_topology(10, 2, 1, 0, 'medium', False)
+        self.check_topology(1, 0, 1, 1, 'low', False)
+        self.check_topology(2, 0, 1, 1, 'medium', False)
+        self.check_topology(3, 1, 1, 1, 'high', False)
+        self.check_topology(4, 1, 1, 1, 'medium', False)
+        self.check_topology(5, 2, 1, 1, 'high', True)
+
+
+    def guest_set_dispatching(self, dispatching):
+        exec_command(self,
+                f'echo {dispatching} > /sys/devices/system/cpu/dispatching')
+        self.vm.event_wait('CPU_POLARIZATION_CHANGE', self.event_timeout)
+        exec_command_and_wait_for_pattern(self,
+                'cat /sys/devices/system/cpu/dispatching', dispatching)
+
+
+    def test_polarization(self):
+        """
+        This test verifies that QEMU modifies the entitlement change after
+        several guest polarization change requests.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+        res = self.vm.qmp('query-s390x-cpu-polarization')
+        self.assertEqual(res['return']['polarization'], 'horizontal')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+        self.guest_set_dispatching('1');
+        res = self.vm.qmp('query-s390x-cpu-polarization')
+        self.assertEqual(res['return']['polarization'], 'vertical')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+        self.guest_set_dispatching('0');
+        res = self.vm.qmp('query-s390x-cpu-polarization')
+        self.assertEqual(res['return']['polarization'], 'horizontal')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+
+    def check_polarization(self, polarization):
+        #We need to wait for the change to have been propagated to the kernel
+        exec_command_and_wait_for_pattern(self,
+            "\n".join([
+                "timeout 1 sh -c 'while true",
+                'do',
+                '    syspath="/sys/devices/system/cpu/cpu0/polarization"',
+                '    polarization="$(cat "$syspath")" || exit',
+               f'    if [ "$polarization" = "{polarization}" ]; then',
+                '        exit 0',
+                '    fi',
+                '    sleep 0.01',
+                #searched for strings mustn't show up in command, '' to obfuscate
+                "done' && echo succ''ess || echo fail''ure",
+            ]),
+            "success", "failure")
+
+
+    def test_entitlement(self):
+        """
+        This test verifies that QEMU modifies the entitlement
+        after a guest request and that the guest sees the change.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+
+        self.check_polarization('horizontal')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+        self.guest_set_dispatching('1')
+        self.check_polarization('vertical:medium')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'low'})
+        self.assertEqual(res['return'], {})
+        self.check_polarization('vertical:low')
+        self.check_topology(0, 0, 0, 0, 'low', False)
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'medium'})
+        self.assertEqual(res['return'], {})
+        self.check_polarization('vertical:medium')
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'high'})
+        self.assertEqual(res['return'], {})
+        self.check_polarization('vertical:high')
+        self.check_topology(0, 0, 0, 0, 'high', False)
+
+        self.guest_set_dispatching('0');
+        self.check_polarization("horizontal")
+        self.check_topology(0, 0, 0, 0, 'high', False)
+
+
+    def test_dedicated(self):
+        """
+        This test verifies that QEMU adjusts the entitlement correctly when a
+        CPU is made dedicated.
+        QEMU retains the entitlement value when horizontal polarization is in effect.
+        For the guest, the field shows the effective value of the entitlement.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+
+        self.check_polarization("horizontal")
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'dedicated': True})
+        self.assertEqual(res['return'], {})
+        self.check_topology(0, 0, 0, 0, 'high', True)
+        self.check_polarization("horizontal")
+
+        self.guest_set_dispatching('1');
+        self.check_topology(0, 0, 0, 0, 'high', True)
+        self.check_polarization("vertical:high")
+
+        self.guest_set_dispatching('0');
+        self.check_topology(0, 0, 0, 0, 'high', True)
+        self.check_polarization("horizontal")
+
+
+    def test_socket_full(self):
+        """
+        This test verifies that QEMU does not accept to overload a socket.
+        The socket-id 0 on book-id 0 already contains CPUs 0 and 1 and can
+        not accept any new CPU while socket-id 0 on book-id 1 is free.
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.add_args('-smp',
+                         '3,drawers=2,books=2,sockets=3,cores=2,maxcpus=24')
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 2, 'socket-id': 0, 'book-id': 0})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 2, 'socket-id': 0, 'book-id': 1})
+        self.assertEqual(res['return'], {})
+
+    def test_dedicated_error(self):
+        """
+        This test verifies that QEMU refuses to lower the entitlement
+        of a dedicated CPU
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'dedicated': True})
+        self.assertEqual(res['return'], {})
+
+        self.check_topology(0, 0, 0, 0, 'high', True)
+
+        self.guest_set_dispatching('1');
+
+        self.check_topology(0, 0, 0, 0, 'high', True)
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'low', 'dedicated': True})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'low'})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'medium', 'dedicated': True})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'medium'})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'low', 'dedicated': False})
+        self.assertEqual(res['return'], {})
+
+        res = self.vm.qmp('set-cpu-topology',
+                          {'core-id': 0, 'entitlement': 'medium', 'dedicated': False})
+        self.assertEqual(res['return'], {})
+
+    def test_move_error(self):
+        """
+        This test verifies that QEMU refuses to move a CPU to an
+        nonexistent location
+        """
+        self.set_machine('s390-ccw-virtio')
+        self.kernel_init()
+        self.vm.launch()
+        self.wait_until_booted()
+
+        self.system_init()
+
+        res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'drawer-id': 1})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'book-id': 1})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        res = self.vm.qmp('set-cpu-topology', {'core-id': 0, 'socket-id': 1})
+        self.assertEqual(res['error']['class'], 'GenericError')
+
+        self.check_topology(0, 0, 0, 0, 'medium', False)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_sparc64_sun4u.py b/tests/functional/test_sparc64_sun4u.py
new file mode 100755
index 0000000000..32e245f4ad
--- /dev/null
+++ b/tests/functional/test_sparc64_sun4u.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+#
+# Functional test that boots a Linux kernel and checks the console
+#
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# Author:
+#  Thomas Huth <thuth@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later. See the COPYING file in the top-level directory.
+
+import os
+
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test.utils import archive_extract
+
+class Sun4uMachine(QemuSystemTest):
+    """Boots the Linux kernel and checks that the console is operational"""
+
+    timeout = 90
+
+    ASSET_IMAGE = Asset(
+        ('https://qemu-advcal.gitlab.io/qac-best-of-multiarch/download/'
+         'day23.tar.xz'),
+        'a3ed92450704af244178351afd0e769776e7decb298e95a63abfd9a6e3f6c854')
+
+    def test_sparc64_sun4u(self):
+        self.set_machine('sun4u')
+        file_path = self.ASSET_IMAGE.fetch()
+        kernel_name = 'day23/vmlinux'
+        archive_extract(file_path, self.workdir, kernel_name)
+        self.vm.set_console()
+        self.vm.add_args('-kernel', os.path.join(self.workdir, kernel_name),
+                         '-append', 'printk.time=0')
+        self.vm.launch()
+        wait_for_console_pattern(self, 'Starting logging: OK')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_version.py b/tests/functional/test_version.py
new file mode 100755
index 0000000000..3ab3b67f7e
--- /dev/null
+++ b/tests/functional/test_version.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Version check example test
+#
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+#  Cleber Rosa <crosa@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+
+from qemu_test import QemuSystemTest
+
+
+class Version(QemuSystemTest):
+
+    def test_qmp_human_info_version(self):
+        self.set_machine('none')
+        self.vm.add_args('-nodefaults')
+        self.vm.launch()
+        res = self.vm.cmd('human-monitor-command',
+                          command_line='info version')
+        self.assertRegex(res, r'^(\d+\.\d+\.\d)')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_virtio_gpu.py b/tests/functional/test_virtio_gpu.py
new file mode 100755
index 0000000000..441cbdcf2d
--- /dev/null
+++ b/tests/functional/test_virtio_gpu.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+#
+# virtio-gpu tests
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+
+
+from qemu_test import BUILD_DIR
+from qemu_test import QemuSystemTest, Asset
+from qemu_test import wait_for_console_pattern
+from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import is_readable_executable_file
+
+from qemu.utils import kvm_available
+
+import os
+import socket
+import subprocess
+
+
+def pick_default_vug_bin():
+    relative_path = "./contrib/vhost-user-gpu/vhost-user-gpu"
+    if is_readable_executable_file(relative_path):
+        return relative_path
+
+    bld_dir_path = os.path.join(BUILD_DIR, relative_path)
+    if is_readable_executable_file(bld_dir_path):
+        return bld_dir_path
+
+
+class VirtioGPUx86(QemuSystemTest):
+
+    KERNEL_COMMAND_LINE = "printk.time=0 console=ttyS0 rdinit=/bin/bash"
+    ASSET_KERNEL = Asset(
+        ("https://archives.fedoraproject.org/pub/archive/fedora"
+         "/linux/releases/33/Everything/x86_64/os/images"
+         "/pxeboot/vmlinuz"),
+        '2dc5fb5cfe9ac278fa45640f3602d9b7a08cc189ed63fd9b162b07073e4df397')
+    ASSET_INITRD = Asset(
+        ("https://archives.fedoraproject.org/pub/archive/fedora"
+         "/linux/releases/33/Everything/x86_64/os/images"
+         "/pxeboot/initrd.img"),
+        'c49b97f893a5349e4883452178763e402bdc5caa8845b226a2d1329b5f356045')
+
+    def wait_for_console_pattern(self, success_message, vm=None):
+        wait_for_console_pattern(
+            self,
+            success_message,
+            failure_message="Kernel panic - not syncing",
+            vm=vm,
+        )
+
+    def test_virtio_vga_virgl(self):
+        # FIXME: should check presence of virtio, virgl etc
+        self.require_accelerator('kvm')
+
+        kernel_path = self.ASSET_KERNEL.fetch()
+        initrd_path = self.ASSET_INITRD.fetch()
+
+        self.vm.set_console()
+        self.vm.add_args("-cpu", "host")
+        self.vm.add_args("-m", "2G")
+        self.vm.add_args("-machine", "pc,accel=kvm")
+        self.vm.add_args("-device", "virtio-vga-gl")
+        self.vm.add_args("-display", "egl-headless")
+        self.vm.add_args(
+            "-kernel",
+            kernel_path,
+            "-initrd",
+            initrd_path,
+            "-append",
+            self.KERNEL_COMMAND_LINE,
+        )
+        try:
+            self.vm.launch()
+        except:
+            # TODO: probably fails because we are missing the VirGL features
+            self.skipTest("VirGL not enabled?")
+
+        self.wait_for_console_pattern("as init process")
+        exec_command_and_wait_for_pattern(
+            self, "/usr/sbin/modprobe virtio_gpu", ""
+        )
+        self.wait_for_console_pattern("features: +virgl +edid")
+
+    def test_vhost_user_vga_virgl(self):
+        # FIXME: should check presence of vhost-user-gpu, virgl, memfd etc
+        self.require_accelerator('kvm')
+
+        vug = pick_default_vug_bin()
+        if not vug:
+            self.skipTest("Could not find vhost-user-gpu")
+
+        kernel_path = self.ASSET_KERNEL.fetch()
+        initrd_path = self.ASSET_INITRD.fetch()
+
+        # Create socketpair to connect proxy and remote processes
+        qemu_sock, vug_sock = socket.socketpair(
+            socket.AF_UNIX, socket.SOCK_STREAM
+        )
+        os.set_inheritable(qemu_sock.fileno(), True)
+        os.set_inheritable(vug_sock.fileno(), True)
+
+        self._vug_log_path = os.path.join(
+            self.logdir, "vhost-user-gpu.log"
+        )
+        self._vug_log_file = open(self._vug_log_path, "wb")
+        self.log.info('Complete vhost-user-gpu.log file can be '
+                      'found at %s', self._vug_log_path)
+
+        vugp = subprocess.Popen(
+            [vug, "--virgl", "--fd=%d" % vug_sock.fileno()],
+            stdin=subprocess.DEVNULL,
+            stdout=self._vug_log_file,
+            stderr=subprocess.STDOUT,
+            shell=False,
+            close_fds=False,
+        )
+
+        self.vm.set_console()
+        self.vm.add_args("-cpu", "host")
+        self.vm.add_args("-m", "2G")
+        self.vm.add_args("-object", "memory-backend-memfd,id=mem,size=2G")
+        self.vm.add_args("-machine", "pc,memory-backend=mem,accel=kvm")
+        self.vm.add_args("-chardev", "socket,id=vug,fd=%d" % qemu_sock.fileno())
+        self.vm.add_args("-device", "vhost-user-vga,chardev=vug")
+        self.vm.add_args("-display", "egl-headless")
+        self.vm.add_args(
+            "-kernel",
+            kernel_path,
+            "-initrd",
+            initrd_path,
+            "-append",
+            self.KERNEL_COMMAND_LINE,
+        )
+        try:
+            self.vm.launch()
+        except:
+            # TODO: probably fails because we are missing the VirGL features
+            self.skipTest("VirGL not enabled?")
+        self.wait_for_console_pattern("as init process")
+        exec_command_and_wait_for_pattern(self, "/usr/sbin/modprobe virtio_gpu",
+                                          "features: +virgl +edid")
+        self.vm.shutdown()
+        qemu_sock.close()
+        vugp.terminate()
+        vugp.wait()
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_virtio_version.py b/tests/functional/test_virtio_version.py
new file mode 100755
index 0000000000..eb23060564
--- /dev/null
+++ b/tests/functional/test_virtio_version.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+"""
+Check compatibility of virtio device types
+"""
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# Author:
+#  Eduardo Habkost <ehabkost@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or
+# later.  See the COPYING file in the top-level directory.
+import sys
+import os
+
+from qemu.machine import QEMUMachine
+from qemu_test import QemuSystemTest
+
+# Virtio Device IDs:
+VIRTIO_NET = 1
+VIRTIO_BLOCK = 2
+VIRTIO_CONSOLE = 3
+VIRTIO_RNG = 4
+VIRTIO_BALLOON = 5
+VIRTIO_RPMSG = 7
+VIRTIO_SCSI = 8
+VIRTIO_9P = 9
+VIRTIO_RPROC_SERIAL = 11
+VIRTIO_CAIF = 12
+VIRTIO_GPU = 16
+VIRTIO_INPUT = 18
+VIRTIO_VSOCK = 19
+VIRTIO_CRYPTO = 20
+
+PCI_VENDOR_ID_REDHAT_QUMRANET = 0x1af4
+
+# Device IDs for legacy/transitional devices:
+PCI_LEGACY_DEVICE_IDS = {
+    VIRTIO_NET:     0x1000,
+    VIRTIO_BLOCK:   0x1001,
+    VIRTIO_BALLOON: 0x1002,
+    VIRTIO_CONSOLE: 0x1003,
+    VIRTIO_SCSI:    0x1004,
+    VIRTIO_RNG:     0x1005,
+    VIRTIO_9P:      0x1009,
+    VIRTIO_VSOCK:   0x1012,
+}
+
+def pci_modern_device_id(virtio_devid):
+    return virtio_devid + 0x1040
+
+def devtype_implements(vm, devtype, implements):
+    return devtype in [d['name'] for d in
+                       vm.cmd('qom-list-types', implements=implements)]
+
+def get_pci_interfaces(vm, devtype):
+    interfaces = ('pci-express-device', 'conventional-pci-device')
+    return [i for i in interfaces if devtype_implements(vm, devtype, i)]
+
+class VirtioVersionCheck(QemuSystemTest):
+    """
+    Check if virtio-version-specific device types result in the
+    same device tree created by `disable-modern` and
+    `disable-legacy`.
+    """
+
+    # just in case there are failures, show larger diff:
+    maxDiff = 4096
+
+    def run_device(self, devtype, opts=None, machine='pc'):
+        """
+        Run QEMU with `-device DEVTYPE`, return device info from `query-pci`
+        """
+        with QEMUMachine(self.qemu_bin) as vm:
+            vm.set_machine(machine)
+            if opts:
+                devtype += ',' + opts
+            vm.add_args('-device', '%s,id=devfortest' % (devtype))
+            vm.add_args('-S')
+            vm.launch()
+
+            pcibuses = vm.cmd('query-pci')
+            alldevs = [dev for bus in pcibuses for dev in bus['devices']]
+            devfortest = [dev for dev in alldevs
+                          if dev['qdev_id'] == 'devfortest']
+            return devfortest[0], get_pci_interfaces(vm, devtype)
+
+
+    def assert_devids(self, dev, devid, non_transitional=False):
+        self.assertEqual(dev['id']['vendor'], PCI_VENDOR_ID_REDHAT_QUMRANET)
+        self.assertEqual(dev['id']['device'], devid)
+        if non_transitional:
+            self.assertTrue(0x1040 <= dev['id']['device'] <= 0x107f)
+            self.assertGreaterEqual(dev['id']['subsystem'], 0x40)
+
+    def check_all_variants(self, qemu_devtype, virtio_devid):
+        """Check if a virtio device type and its variants behave as expected"""
+        # Force modern mode:
+        dev_modern, _ = self.run_device(qemu_devtype,
+                                       'disable-modern=off,disable-legacy=on')
+        self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
+                           non_transitional=True)
+
+        # <prefix>-non-transitional device types should be 100% equivalent to
+        # <prefix>,disable-modern=off,disable-legacy=on
+        dev_1_0, nt_ifaces = self.run_device('%s-non-transitional' % (qemu_devtype))
+        self.assertEqual(dev_modern, dev_1_0)
+
+        # Force transitional mode:
+        dev_trans, _ = self.run_device(qemu_devtype,
+                                      'disable-modern=off,disable-legacy=off')
+        self.assert_devids(dev_trans, PCI_LEGACY_DEVICE_IDS[virtio_devid])
+
+        # Force legacy mode:
+        dev_legacy, _ = self.run_device(qemu_devtype,
+                                       'disable-modern=on,disable-legacy=off')
+        self.assert_devids(dev_legacy, PCI_LEGACY_DEVICE_IDS[virtio_devid])
+
+        # No options: default to transitional on PC machine-type:
+        no_opts_pc, generic_ifaces = self.run_device(qemu_devtype)
+        self.assertEqual(dev_trans, no_opts_pc)
+
+        #TODO: check if plugging on a PCI Express bus will make the
+        #      device non-transitional
+        #no_opts_q35 = self.run_device(qemu_devtype, machine='q35')
+        #self.assertEqual(dev_modern, no_opts_q35)
+
+        # <prefix>-transitional device types should be 100% equivalent to
+        # <prefix>,disable-modern=off,disable-legacy=off
+        dev_trans, trans_ifaces = self.run_device('%s-transitional' % (qemu_devtype))
+        self.assertEqual(dev_trans, dev_trans)
+
+        # ensure the interface information is correct:
+        self.assertIn('conventional-pci-device', generic_ifaces)
+        self.assertIn('pci-express-device', generic_ifaces)
+
+        self.assertIn('conventional-pci-device', nt_ifaces)
+        self.assertIn('pci-express-device', nt_ifaces)
+
+        self.assertIn('conventional-pci-device', trans_ifaces)
+        self.assertNotIn('pci-express-device', trans_ifaces)
+
+
+    def test_conventional_devs(self):
+        self.check_all_variants('virtio-net-pci', VIRTIO_NET)
+        # virtio-blk requires 'driver' parameter
+        #self.check_all_variants('virtio-blk-pci', VIRTIO_BLOCK)
+        self.check_all_variants('virtio-serial-pci', VIRTIO_CONSOLE)
+        self.check_all_variants('virtio-rng-pci', VIRTIO_RNG)
+        self.check_all_variants('virtio-balloon-pci', VIRTIO_BALLOON)
+        self.check_all_variants('virtio-scsi-pci', VIRTIO_SCSI)
+        # virtio-9p requires 'fsdev' parameter
+        #self.check_all_variants('virtio-9p-pci', VIRTIO_9P)
+
+    def check_modern_only(self, qemu_devtype, virtio_devid):
+        """Check if a modern-only virtio device type behaves as expected"""
+        # Force modern mode:
+        dev_modern, _ = self.run_device(qemu_devtype,
+                                       'disable-modern=off,disable-legacy=on')
+        self.assert_devids(dev_modern, pci_modern_device_id(virtio_devid),
+                           non_transitional=True)
+
+        # No options: should be modern anyway
+        dev_no_opts, ifaces = self.run_device(qemu_devtype)
+        self.assertEqual(dev_modern, dev_no_opts)
+
+        self.assertIn('conventional-pci-device', ifaces)
+        self.assertIn('pci-express-device', ifaces)
+
+    def test_modern_only_devs(self):
+        self.check_modern_only('virtio-vga', VIRTIO_GPU)
+        self.check_modern_only('virtio-gpu-pci', VIRTIO_GPU)
+        self.check_modern_only('virtio-mouse-pci', VIRTIO_INPUT)
+        self.check_modern_only('virtio-tablet-pci', VIRTIO_INPUT)
+        self.check_modern_only('virtio-keyboard-pci', VIRTIO_INPUT)
+
+if __name__ == '__main__':
+    QemuSystemTest.main()
diff --git a/tests/functional/test_x86_cpu_model_versions.py b/tests/functional/test_x86_cpu_model_versions.py
new file mode 100755
index 0000000000..bd18acd44f
--- /dev/null
+++ b/tests/functional/test_x86_cpu_model_versions.py
@@ -0,0 +1,335 @@
+#!/usr/bin/env python3
+#
+# Basic validation of x86 versioned CPU models and CPU model aliases
+#
+#  Copyright (c) 2019 Red Hat Inc
+#
+# Author:
+#  Eduardo Habkost <ehabkost@redhat.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+
+from qemu_test import QemuSystemTest
+
+class X86CPUModelAliases(QemuSystemTest):
+    """
+    Validation of PC CPU model versions and CPU model aliases
+    """
+    def validate_aliases(self, cpus):
+        for c in cpus.values():
+            if 'alias-of' in c:
+                # all aliases must point to a valid CPU model name:
+                self.assertIn(c['alias-of'], cpus,
+                              '%s.alias-of (%s) is not a valid CPU model name' % (c['name'], c['alias-of']))
+                # aliases must not point to aliases
+                self.assertNotIn('alias-of', cpus[c['alias-of']],
+                                 '%s.alias-of (%s) points to another alias' % (c['name'], c['alias-of']))
+
+                # aliases must not be static
+                self.assertFalse(c['static'])
+
+    def validate_variant_aliases(self, cpus):
+        # -noTSX, -IBRS and -IBPB variants of CPU models are special:
+        # they shouldn't have their own versions:
+        self.assertNotIn("Haswell-noTSX-v1", cpus,
+                         "Haswell-noTSX shouldn't be versioned")
+        self.assertNotIn("Broadwell-noTSX-v1", cpus,
+                         "Broadwell-noTSX shouldn't be versioned")
+        self.assertNotIn("Nehalem-IBRS-v1", cpus,
+                         "Nehalem-IBRS shouldn't be versioned")
+        self.assertNotIn("Westmere-IBRS-v1", cpus,
+                         "Westmere-IBRS shouldn't be versioned")
+        self.assertNotIn("SandyBridge-IBRS-v1", cpus,
+                         "SandyBridge-IBRS shouldn't be versioned")
+        self.assertNotIn("IvyBridge-IBRS-v1", cpus,
+                         "IvyBridge-IBRS shouldn't be versioned")
+        self.assertNotIn("Haswell-noTSX-IBRS-v1", cpus,
+                         "Haswell-noTSX-IBRS shouldn't be versioned")
+        self.assertNotIn("Haswell-IBRS-v1", cpus,
+                         "Haswell-IBRS shouldn't be versioned")
+        self.assertNotIn("Broadwell-noTSX-IBRS-v1", cpus,
+                         "Broadwell-noTSX-IBRS shouldn't be versioned")
+        self.assertNotIn("Broadwell-IBRS-v1", cpus,
+                         "Broadwell-IBRS shouldn't be versioned")
+        self.assertNotIn("Skylake-Client-IBRS-v1", cpus,
+                         "Skylake-Client-IBRS shouldn't be versioned")
+        self.assertNotIn("Skylake-Server-IBRS-v1", cpus,
+                         "Skylake-Server-IBRS shouldn't be versioned")
+        self.assertNotIn("EPYC-IBPB-v1", cpus,
+                         "EPYC-IBPB shouldn't be versioned")
+
+    def test_4_0_alias_compatibility(self):
+        """
+        Check if pc-*-4.0 unversioned CPU model won't be reported as aliases
+        """
+        self.set_machine('pc-i440fx-4.0')
+        # pc-*-4.0 won't expose non-versioned CPU models as aliases
+        # We do this to help management software to keep compatibility
+        # with older QEMU versions that didn't have the versioned CPU model
+        self.vm.add_args('-S')
+        self.vm.launch()
+        cpus = dict((m['name'], m) for m in
+                    self.vm.cmd('query-cpu-definitions'))
+
+        self.assertFalse(cpus['Cascadelake-Server']['static'],
+                         'unversioned Cascadelake-Server CPU model must not be static')
+        self.assertNotIn('alias-of', cpus['Cascadelake-Server'],
+                         'Cascadelake-Server must not be an alias')
+        self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+                         'Cascadelake-Server-v1 must not be an alias')
+
+        self.assertFalse(cpus['qemu64']['static'],
+                         'unversioned qemu64 CPU model must not be static')
+        self.assertNotIn('alias-of', cpus['qemu64'],
+                         'qemu64 must not be an alias')
+        self.assertNotIn('alias-of', cpus['qemu64-v1'],
+                         'qemu64-v1 must not be an alias')
+
+        self.validate_variant_aliases(cpus)
+
+        # On pc-*-4.0, no CPU model should be reported as an alias:
+        for name,c in cpus.items():
+            self.assertNotIn('alias-of', c, "%s shouldn't be an alias" % (name))
+
+    def test_4_1_alias(self):
+        """
+        Check if unversioned CPU model is an alias pointing to right version
+        """
+        self.set_machine('pc-i440fx-4.1')
+        self.vm.add_args('-S')
+        self.vm.launch()
+
+        cpus = dict((m['name'], m) for m in
+                    self.vm.cmd('query-cpu-definitions'))
+
+        self.assertFalse(cpus['Cascadelake-Server']['static'],
+                         'unversioned Cascadelake-Server CPU model must not be static')
+        self.assertEqual(cpus['Cascadelake-Server'].get('alias-of'),
+                         'Cascadelake-Server-v1',
+                         'Cascadelake-Server must be an alias of Cascadelake-Server-v1')
+        self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+                         'Cascadelake-Server-v1 must not be an alias')
+
+        self.assertFalse(cpus['qemu64']['static'],
+                         'unversioned qemu64 CPU model must not be static')
+        self.assertEqual(cpus['qemu64'].get('alias-of'), 'qemu64-v1',
+                         'qemu64 must be an alias of qemu64-v1')
+        self.assertNotIn('alias-of', cpus['qemu64-v1'],
+                         'qemu64-v1 must not be an alias')
+
+        self.validate_variant_aliases(cpus)
+
+        # On pc-*-4.1, -noTSX and -IBRS models should be aliases:
+        self.assertEqual(cpus["Haswell"].get('alias-of'),
+                         "Haswell-v1",
+                         "Haswell must be an alias")
+        self.assertEqual(cpus["Haswell-noTSX"].get('alias-of'),
+                         "Haswell-v2",
+                         "Haswell-noTSX must be an alias")
+        self.assertEqual(cpus["Haswell-IBRS"].get('alias-of'),
+                         "Haswell-v3",
+                         "Haswell-IBRS must be an alias")
+        self.assertEqual(cpus["Haswell-noTSX-IBRS"].get('alias-of'),
+                         "Haswell-v4",
+                         "Haswell-noTSX-IBRS must be an alias")
+
+        self.assertEqual(cpus["Broadwell"].get('alias-of'),
+                         "Broadwell-v1",
+                         "Broadwell must be an alias")
+        self.assertEqual(cpus["Broadwell-noTSX"].get('alias-of'),
+                         "Broadwell-v2",
+                         "Broadwell-noTSX must be an alias")
+        self.assertEqual(cpus["Broadwell-IBRS"].get('alias-of'),
+                         "Broadwell-v3",
+                         "Broadwell-IBRS must be an alias")
+        self.assertEqual(cpus["Broadwell-noTSX-IBRS"].get('alias-of'),
+                         "Broadwell-v4",
+                         "Broadwell-noTSX-IBRS must be an alias")
+
+        self.assertEqual(cpus["Nehalem"].get('alias-of'),
+                         "Nehalem-v1",
+                         "Nehalem must be an alias")
+        self.assertEqual(cpus["Nehalem-IBRS"].get('alias-of'),
+                         "Nehalem-v2",
+                         "Nehalem-IBRS must be an alias")
+
+        self.assertEqual(cpus["Westmere"].get('alias-of'),
+                         "Westmere-v1",
+                         "Westmere must be an alias")
+        self.assertEqual(cpus["Westmere-IBRS"].get('alias-of'),
+                         "Westmere-v2",
+                         "Westmere-IBRS must be an alias")
+
+        self.assertEqual(cpus["SandyBridge"].get('alias-of'),
+                         "SandyBridge-v1",
+                         "SandyBridge must be an alias")
+        self.assertEqual(cpus["SandyBridge-IBRS"].get('alias-of'),
+                         "SandyBridge-v2",
+                         "SandyBridge-IBRS must be an alias")
+
+        self.assertEqual(cpus["IvyBridge"].get('alias-of'),
+                         "IvyBridge-v1",
+                         "IvyBridge must be an alias")
+        self.assertEqual(cpus["IvyBridge-IBRS"].get('alias-of'),
+                         "IvyBridge-v2",
+                         "IvyBridge-IBRS must be an alias")
+
+        self.assertEqual(cpus["Skylake-Client"].get('alias-of'),
+                         "Skylake-Client-v1",
+                         "Skylake-Client must be an alias")
+        self.assertEqual(cpus["Skylake-Client-IBRS"].get('alias-of'),
+                         "Skylake-Client-v2",
+                         "Skylake-Client-IBRS must be an alias")
+
+        self.assertEqual(cpus["Skylake-Server"].get('alias-of'),
+                         "Skylake-Server-v1",
+                         "Skylake-Server must be an alias")
+        self.assertEqual(cpus["Skylake-Server-IBRS"].get('alias-of'),
+                         "Skylake-Server-v2",
+                         "Skylake-Server-IBRS must be an alias")
+
+        self.assertEqual(cpus["EPYC"].get('alias-of'),
+                         "EPYC-v1",
+                         "EPYC must be an alias")
+        self.assertEqual(cpus["EPYC-IBPB"].get('alias-of'),
+                         "EPYC-v2",
+                         "EPYC-IBPB must be an alias")
+
+        self.validate_aliases(cpus)
+
+    def test_none_alias(self):
+        """
+        Check if unversioned CPU model is an alias pointing to some version
+        """
+        self.set_machine('none')
+        self.vm.add_args('-S')
+        self.vm.launch()
+
+        cpus = dict((m['name'], m) for m in
+                    self.vm.cmd('query-cpu-definitions'))
+
+        self.assertFalse(cpus['Cascadelake-Server']['static'],
+                         'unversioned Cascadelake-Server CPU model must not be static')
+        self.assertTrue(re.match('Cascadelake-Server-v[0-9]+', cpus['Cascadelake-Server']['alias-of']),
+                        'Cascadelake-Server must be an alias of versioned CPU model')
+        self.assertNotIn('alias-of', cpus['Cascadelake-Server-v1'],
+                         'Cascadelake-Server-v1 must not be an alias')
+
+        self.assertFalse(cpus['qemu64']['static'],
+                         'unversioned qemu64 CPU model must not be static')
+        self.assertTrue(re.match('qemu64-v[0-9]+', cpus['qemu64']['alias-of']),
+                        'qemu64 must be an alias of versioned CPU model')
+        self.assertNotIn('alias-of', cpus['qemu64-v1'],
+                         'qemu64-v1 must not be an alias')
+
+        self.validate_aliases(cpus)
+
+
+class CascadelakeArchCapabilities(QemuSystemTest):
+    """
+    Validation of Cascadelake arch-capabilities
+    """
+    def get_cpu_prop(self, prop):
+        cpu_path = self.vm.cmd('query-cpus-fast')[0].get('qom-path')
+        return self.vm.cmd('qom-get', path=cpu_path, property=prop)
+
+    def test_4_1(self):
+        self.set_machine('pc-i440fx-4.1')
+        # machine-type only:
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server,x-force-features=on,check=off,'
+                        'enforce=off')
+        self.vm.launch()
+        self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+                         'pc-i440fx-4.1 + Cascadelake-Server should not have arch-capabilities')
+
+    def test_4_0(self):
+        self.set_machine('pc-i440fx-4.0')
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server,x-force-features=on,check=off,'
+                        'enforce=off')
+        self.vm.launch()
+        self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+                         'pc-i440fx-4.0 + Cascadelake-Server should not have arch-capabilities')
+
+    def test_set_4_0(self):
+        self.set_machine('pc-i440fx-4.0')
+        # command line must override machine-type if CPU model is not versioned:
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server,x-force-features=on,check=off,'
+                        'enforce=off,+arch-capabilities')
+        self.vm.launch()
+        self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+                        'pc-i440fx-4.0 + Cascadelake-Server,+arch-capabilities should have arch-capabilities')
+
+    def test_unset_4_1(self):
+        self.set_machine('pc-i440fx-4.1')
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server,x-force-features=on,check=off,'
+                        'enforce=off,-arch-capabilities')
+        self.vm.launch()
+        self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+                         'pc-i440fx-4.1 + Cascadelake-Server,-arch-capabilities should not have arch-capabilities')
+
+    def test_v1_4_0(self):
+        self.set_machine('pc-i440fx-4.0')
+        # versioned CPU model overrides machine-type:
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server-v1,x-force-features=on,check=off,'
+                        'enforce=off')
+        self.vm.launch()
+        self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+                         'pc-i440fx-4.0 + Cascadelake-Server-v1 should not have arch-capabilities')
+
+    def test_v2_4_0(self):
+        self.set_machine('pc-i440fx-4.0')
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server-v2,x-force-features=on,check=off,'
+                        'enforce=off')
+        self.vm.launch()
+        self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+                        'pc-i440fx-4.0 + Cascadelake-Server-v2 should have arch-capabilities')
+
+    def test_v1_set_4_0(self):
+        self.set_machine('pc-i440fx-4.0')
+        # command line must override machine-type and versioned CPU model:
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server-v1,x-force-features=on,check=off,'
+                        'enforce=off,+arch-capabilities')
+        self.vm.launch()
+        self.assertTrue(self.get_cpu_prop('arch-capabilities'),
+                        'pc-i440fx-4.0 + Cascadelake-Server-v1,+arch-capabilities should have arch-capabilities')
+
+    def test_v2_unset_4_1(self):
+        self.set_machine('pc-i440fx-4.1')
+        self.vm.add_args('-S')
+        self.set_vm_arg('-cpu',
+                        'Cascadelake-Server-v2,x-force-features=on,check=off,'
+                        'enforce=off,-arch-capabilities')
+        self.vm.launch()
+        self.assertFalse(self.get_cpu_prop('arch-capabilities'),
+                         'pc-i440fx-4.1 + Cascadelake-Server-v2,-arch-capabilities should not have arch-capabilities')
+
+if __name__ == '__main__':
+    QemuSystemTest.main()