summary refs log tree commit diff stats
path: root/hw/acpi
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-06-24 11:00:15 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-06-24 11:00:15 +0100
commita01aef5d2f96c334d048f43f0d3573a1152b37ca (patch)
treefce1969574c37a87816c17f98d4fd5a9f818bf00 /hw/acpi
parentc7288767523f6510cf557707d3eb5e78e519b90d (diff)
parent21a4d96243e60a4c8eeb124a023b8a3bd9120e18 (diff)
downloadfocaccia-qemu-a01aef5d2f96c334d048f43f0d3573a1152b37ca.tar.gz
focaccia-qemu-a01aef5d2f96c334d048f43f0d3573a1152b37ca.zip
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
pc, pci, virtio: new features, cleanups, fixes

nvdimm label support
cpu acpi hotplug rework
virtio rework
misc cleanups and fixes

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

# gpg: Signature made Fri 24 Jun 2016 06:50:32 BST
# gpg:                using RSA key 0x281F0DB8D28D5469
# gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>"
# gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>"
# Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
#      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* remotes/mst/tags/for_upstream: (34 commits)
  virtio-bus: remove old set_host_notifier callback
  virtio-mmio: convert to ioeventfd callbacks
  virtio-pci: convert to ioeventfd callbacks
  virtio-ccw: convert to ioeventfd callbacks
  virtio-bus: have callers tolerate new host notifier api
  virtio-bus: common ioeventfd infrastructure
  pc: acpi: drop intermediate PCMachineState.node_cpu
  acpi-test-data: update expected
  pc: use new CPU hotplug interface since 2.7 machine type
  acpi: cpuhp: add cpu._OST handling
  acpi: cpuhp: implement hot-remove parts of CPU hotplug interface
  acpi: cpuhp: implement hot-add parts of CPU hotplug interface
  pc: acpi: introduce AcpiDeviceIfClass.madt_cpu hook
  acpi: cpuhp: add CPU devices AML with _STA method
  pc: piix4/ich9: add 'cpu-hotplug-legacy' property
  docs: update ACPI CPU hotplug spec with new protocol
  i386: pci-assign: Fix MSI-X table size
  docs: add NVDIMM ACPI documentation
  nvdimm acpi: support Set Namespace Label Data function
  nvdimm acpi: support Get Namespace Label Data function
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/acpi')
-rw-r--r--hw/acpi/Makefile.objs2
-rw-r--r--hw/acpi/aml-build.c22
-rw-r--r--hw/acpi/cpu.c561
-rw-r--r--hw/acpi/cpu_hotplug.c21
-rw-r--r--hw/acpi/ich9.c69
-rw-r--r--hw/acpi/ipmi.c105
-rw-r--r--hw/acpi/nvdimm.c400
-rw-r--r--hw/acpi/piix4.c71
-rw-r--r--hw/acpi/trace-events14
9 files changed, 1229 insertions, 36 deletions
diff --git a/hw/acpi/Makefile.objs b/hw/acpi/Makefile.objs
index 66bd72702b..4b7da6639f 100644
--- a/hw/acpi/Makefile.objs
+++ b/hw/acpi/Makefile.objs
@@ -2,7 +2,9 @@ common-obj-$(CONFIG_ACPI_X86) += core.o piix4.o pcihp.o
 common-obj-$(CONFIG_ACPI_X86_ICH) += ich9.o tco.o
 common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu_hotplug.o
 common-obj-$(CONFIG_ACPI_MEMORY_HOTPLUG) += memory_hotplug.o memory_hotplug_acpi_table.o
+common-obj-$(CONFIG_ACPI_CPU_HOTPLUG) += cpu.o
 obj-$(CONFIG_ACPI_NVDIMM) += nvdimm.o
 common-obj-$(CONFIG_ACPI) += acpi_interface.o
 common-obj-$(CONFIG_ACPI) += bios-linker-loader.o
 common-obj-$(CONFIG_ACPI) += aml-build.o
+common-obj-$(call land,$(CONFIG_ACPI),$(CONFIG_IPMI)) += ipmi.o
diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 874e473cac..db3e914fb4 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -660,6 +660,20 @@ Aml *aml_call4(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4)
     return var;
 }
 
+/* helper to call method with 5 arguments */
+Aml *aml_call5(const char *method, Aml *arg1, Aml *arg2, Aml *arg3, Aml *arg4,
+               Aml *arg5)
+{
+    Aml *var = aml_alloc();
+    build_append_namestring(var->buf, "%s", method);
+    aml_append(var, arg1);
+    aml_append(var, arg2);
+    aml_append(var, arg3);
+    aml_append(var, arg4);
+    aml_append(var, arg5);
+    return var;
+}
+
 /*
  * ACPI 5.0: 6.4.3.8.1 GPIO Connection Descriptor
  * Type 1, Large Item Name 0xC
@@ -1481,6 +1495,14 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target)
                                  target);
 }
 
+/* ACPI 1.0b: 16.2.5.4 Type 2 Opcodes Encoding: DefObjectType */
+Aml *aml_object_type(Aml *object)
+{
+    Aml *var = aml_opcode(0x8E /* ObjectTypeOp */);
+    aml_append(var, object);
+    return var;
+}
+
 void
 build_header(BIOSLinker *linker, GArray *table_data,
              AcpiTableHeader *h, const char *sig, int len, uint8_t rev,
diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c
new file mode 100644
index 0000000000..c13b65c2c9
--- /dev/null
+++ b/hw/acpi/cpu.c
@@ -0,0 +1,561 @@
+#include "qemu/osdep.h"
+#include "hw/boards.h"
+#include "hw/acpi/cpu.h"
+#include "qapi/error.h"
+#include "qapi-event.h"
+#include "trace.h"
+
+#define ACPI_CPU_HOTPLUG_REG_LEN 12
+#define ACPI_CPU_SELECTOR_OFFSET_WR 0
+#define ACPI_CPU_FLAGS_OFFSET_RW 4
+#define ACPI_CPU_CMD_OFFSET_WR 5
+#define ACPI_CPU_CMD_DATA_OFFSET_RW 8
+
+enum {
+    CPHP_GET_NEXT_CPU_WITH_EVENT_CMD = 0,
+    CPHP_OST_EVENT_CMD = 1,
+    CPHP_OST_STATUS_CMD = 2,
+    CPHP_CMD_MAX
+};
+
+static ACPIOSTInfo *acpi_cpu_device_status(int idx, AcpiCpuStatus *cdev)
+{
+    ACPIOSTInfo *info = g_new0(ACPIOSTInfo, 1);
+
+    info->slot_type = ACPI_SLOT_TYPE_CPU;
+    info->slot = g_strdup_printf("%d", idx);
+    info->source = cdev->ost_event;
+    info->status = cdev->ost_status;
+    if (cdev->cpu) {
+        DeviceState *dev = DEVICE(cdev->cpu);
+        if (dev->id) {
+            info->device = g_strdup(dev->id);
+            info->has_device = true;
+        }
+    }
+    return info;
+}
+
+void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list)
+{
+    int i;
+
+    for (i = 0; i < cpu_st->dev_count; i++) {
+        ACPIOSTInfoList *elem = g_new0(ACPIOSTInfoList, 1);
+        elem->value = acpi_cpu_device_status(i, &cpu_st->devs[i]);
+        elem->next = NULL;
+        **list = elem;
+        *list = &elem->next;
+    }
+}
+
+static uint64_t cpu_hotplug_rd(void *opaque, hwaddr addr, unsigned size)
+{
+    uint64_t val = 0;
+    CPUHotplugState *cpu_st = opaque;
+    AcpiCpuStatus *cdev;
+
+    if (cpu_st->selector >= cpu_st->dev_count) {
+        return val;
+    }
+
+    cdev = &cpu_st->devs[cpu_st->selector];
+    switch (addr) {
+    case ACPI_CPU_FLAGS_OFFSET_RW: /* pack and return is_* fields */
+        val |= cdev->cpu ? 1 : 0;
+        val |= cdev->is_inserting ? 2 : 0;
+        val |= cdev->is_removing  ? 4 : 0;
+        trace_cpuhp_acpi_read_flags(cpu_st->selector, val);
+        break;
+    case ACPI_CPU_CMD_DATA_OFFSET_RW:
+        switch (cpu_st->command) {
+        case CPHP_GET_NEXT_CPU_WITH_EVENT_CMD:
+           val = cpu_st->selector;
+           break;
+        default:
+           break;
+        }
+        trace_cpuhp_acpi_read_cmd_data(cpu_st->selector, val);
+        break;
+    default:
+        break;
+    }
+    return val;
+}
+
+static void cpu_hotplug_wr(void *opaque, hwaddr addr, uint64_t data,
+                           unsigned int size)
+{
+    CPUHotplugState *cpu_st = opaque;
+    AcpiCpuStatus *cdev;
+    ACPIOSTInfo *info;
+
+    assert(cpu_st->dev_count);
+
+    if (addr) {
+        if (cpu_st->selector >= cpu_st->dev_count) {
+            trace_cpuhp_acpi_invalid_idx_selected(cpu_st->selector);
+            return;
+        }
+    }
+
+    switch (addr) {
+    case ACPI_CPU_SELECTOR_OFFSET_WR: /* current CPU selector */
+        cpu_st->selector = data;
+        trace_cpuhp_acpi_write_idx(cpu_st->selector);
+        break;
+    case ACPI_CPU_FLAGS_OFFSET_RW: /* set is_* fields  */
+        cdev = &cpu_st->devs[cpu_st->selector];
+        if (data & 2) { /* clear insert event */
+            cdev->is_inserting = false;
+            trace_cpuhp_acpi_clear_inserting_evt(cpu_st->selector);
+        } else if (data & 4) { /* clear remove event */
+            cdev->is_removing = false;
+            trace_cpuhp_acpi_clear_remove_evt(cpu_st->selector);
+        } else if (data & 8) {
+            DeviceState *dev = NULL;
+            HotplugHandler *hotplug_ctrl = NULL;
+
+            if (!cdev->cpu) {
+                trace_cpuhp_acpi_ejecting_invalid_cpu(cpu_st->selector);
+                break;
+            }
+
+            trace_cpuhp_acpi_ejecting_cpu(cpu_st->selector);
+            dev = DEVICE(cdev->cpu);
+            hotplug_ctrl = qdev_get_hotplug_handler(dev);
+            hotplug_handler_unplug(hotplug_ctrl, dev, NULL);
+        }
+        break;
+    case ACPI_CPU_CMD_OFFSET_WR:
+        trace_cpuhp_acpi_write_cmd(cpu_st->selector, data);
+        if (data < CPHP_CMD_MAX) {
+            cpu_st->command = data;
+            if (cpu_st->command == CPHP_GET_NEXT_CPU_WITH_EVENT_CMD) {
+                uint32_t iter = cpu_st->selector;
+
+                do {
+                    cdev = &cpu_st->devs[iter];
+                    if (cdev->is_inserting || cdev->is_removing) {
+                        cpu_st->selector = iter;
+                        trace_cpuhp_acpi_cpu_has_events(cpu_st->selector,
+                            cdev->is_inserting, cdev->is_removing);
+                        break;
+                    }
+                    iter = iter + 1 < cpu_st->dev_count ? iter + 1 : 0;
+                } while (iter != cpu_st->selector);
+            }
+        }
+        break;
+    case ACPI_CPU_CMD_DATA_OFFSET_RW:
+        switch (cpu_st->command) {
+        case CPHP_OST_EVENT_CMD: {
+           cdev = &cpu_st->devs[cpu_st->selector];
+           cdev->ost_event = data;
+           trace_cpuhp_acpi_write_ost_ev(cpu_st->selector, cdev->ost_event);
+           break;
+        }
+        case CPHP_OST_STATUS_CMD: {
+           cdev = &cpu_st->devs[cpu_st->selector];
+           cdev->ost_status = data;
+           info = acpi_cpu_device_status(cpu_st->selector, cdev);
+           qapi_event_send_acpi_device_ost(info, &error_abort);
+           qapi_free_ACPIOSTInfo(info);
+           trace_cpuhp_acpi_write_ost_status(cpu_st->selector,
+                                             cdev->ost_status);
+           break;
+        }
+        default:
+           break;
+        }
+        break;
+    default:
+        break;
+    }
+}
+
+static const MemoryRegionOps cpu_hotplug_ops = {
+    .read = cpu_hotplug_rd,
+    .write = cpu_hotplug_wr,
+    .endianness = DEVICE_LITTLE_ENDIAN,
+    .valid = {
+        .min_access_size = 1,
+        .max_access_size = 4,
+    },
+};
+
+void cpu_hotplug_hw_init(MemoryRegion *as, Object *owner,
+                         CPUHotplugState *state, hwaddr base_addr)
+{
+    MachineState *machine = MACHINE(qdev_get_machine());
+    MachineClass *mc = MACHINE_GET_CLASS(machine);
+    CPUArchIdList *id_list;
+    int i;
+
+    assert(mc->possible_cpu_arch_ids);
+    id_list = mc->possible_cpu_arch_ids(machine);
+    state->dev_count = id_list->len;
+    state->devs = g_new0(typeof(*state->devs), state->dev_count);
+    for (i = 0; i < id_list->len; i++) {
+        state->devs[i].cpu =  id_list->cpus[i].cpu;
+        state->devs[i].arch_id = id_list->cpus[i].arch_id;
+    }
+    g_free(id_list);
+    memory_region_init_io(&state->ctrl_reg, owner, &cpu_hotplug_ops, state,
+                          "acpi-mem-hotplug", ACPI_CPU_HOTPLUG_REG_LEN);
+    memory_region_add_subregion(as, base_addr, &state->ctrl_reg);
+}
+
+static AcpiCpuStatus *get_cpu_status(CPUHotplugState *cpu_st, DeviceState *dev)
+{
+    CPUClass *k = CPU_GET_CLASS(dev);
+    uint64_t cpu_arch_id = k->get_arch_id(CPU(dev));
+    int i;
+
+    for (i = 0; i < cpu_st->dev_count; i++) {
+        if (cpu_arch_id == cpu_st->devs[i].arch_id) {
+            return &cpu_st->devs[i];
+        }
+    }
+    return NULL;
+}
+
+void acpi_cpu_plug_cb(HotplugHandler *hotplug_dev,
+                      CPUHotplugState *cpu_st, DeviceState *dev, Error **errp)
+{
+    AcpiCpuStatus *cdev;
+
+    cdev = get_cpu_status(cpu_st, dev);
+    if (!cdev) {
+        return;
+    }
+
+    cdev->cpu = CPU(dev);
+    if (dev->hotplugged) {
+        cdev->is_inserting = true;
+        acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
+    }
+}
+
+void acpi_cpu_unplug_request_cb(HotplugHandler *hotplug_dev,
+                                CPUHotplugState *cpu_st,
+                                DeviceState *dev, Error **errp)
+{
+    AcpiCpuStatus *cdev;
+
+    cdev = get_cpu_status(cpu_st, dev);
+    if (!cdev) {
+        return;
+    }
+
+    cdev->is_removing = true;
+    acpi_send_event(DEVICE(hotplug_dev), ACPI_CPU_HOTPLUG_STATUS);
+}
+
+void acpi_cpu_unplug_cb(CPUHotplugState *cpu_st,
+                        DeviceState *dev, Error **errp)
+{
+    AcpiCpuStatus *cdev;
+
+    cdev = get_cpu_status(cpu_st, dev);
+    if (!cdev) {
+        return;
+    }
+
+    cdev->cpu = NULL;
+}
+
+static const VMStateDescription vmstate_cpuhp_sts = {
+    .name = "CPU hotplug device state",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField[]) {
+        VMSTATE_BOOL(is_inserting, AcpiCpuStatus),
+        VMSTATE_BOOL(is_removing, AcpiCpuStatus),
+        VMSTATE_UINT32(ost_event, AcpiCpuStatus),
+        VMSTATE_UINT32(ost_status, AcpiCpuStatus),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+const VMStateDescription vmstate_cpu_hotplug = {
+    .name = "CPU hotplug state",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField[]) {
+        VMSTATE_UINT32(selector, CPUHotplugState),
+        VMSTATE_UINT8(command, CPUHotplugState),
+        VMSTATE_STRUCT_VARRAY_POINTER_UINT32(devs, CPUHotplugState, dev_count,
+                                             vmstate_cpuhp_sts, AcpiCpuStatus),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+#define CPU_NAME_FMT      "C%.03X"
+#define CPUHP_RES_DEVICE  "PRES"
+#define CPU_LOCK          "CPLK"
+#define CPU_STS_METHOD    "CSTA"
+#define CPU_SCAN_METHOD   "CSCN"
+#define CPU_NOTIFY_METHOD "CTFY"
+#define CPU_EJECT_METHOD  "CEJ0"
+#define CPU_OST_METHOD    "COST"
+
+#define CPU_ENABLED       "CPEN"
+#define CPU_SELECTOR      "CSEL"
+#define CPU_COMMAND       "CCMD"
+#define CPU_DATA          "CDAT"
+#define CPU_INSERT_EVENT  "CINS"
+#define CPU_REMOVE_EVENT  "CRMV"
+#define CPU_EJECT_EVENT   "CEJ0"
+
+void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts,
+                    hwaddr io_base,
+                    const char *res_root,
+                    const char *event_handler_method)
+{
+    Aml *ifctx;
+    Aml *field;
+    Aml *method;
+    Aml *cpu_ctrl_dev;
+    Aml *cpus_dev;
+    Aml *zero = aml_int(0);
+    Aml *one = aml_int(1);
+    Aml *sb_scope = aml_scope("_SB");
+    MachineClass *mc = MACHINE_GET_CLASS(machine);
+    CPUArchIdList *arch_ids = mc->possible_cpu_arch_ids(machine);
+    char *cphp_res_path = g_strdup_printf("%s." CPUHP_RES_DEVICE, res_root);
+    Object *obj = object_resolve_path_type("", TYPE_ACPI_DEVICE_IF, NULL);
+    AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(obj);
+    AcpiDeviceIf *adev = ACPI_DEVICE_IF(obj);
+
+    cpu_ctrl_dev = aml_device("%s", cphp_res_path);
+    {
+        Aml *crs;
+
+        aml_append(cpu_ctrl_dev,
+            aml_name_decl("_HID", aml_eisaid("PNP0A06")));
+        aml_append(cpu_ctrl_dev,
+            aml_name_decl("_UID", aml_string("CPU Hotplug resources")));
+        aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0));
+
+        crs = aml_resource_template();
+        aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1,
+                               ACPI_CPU_HOTPLUG_REG_LEN));
+        aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs));
+
+        /* declare CPU hotplug MMIO region with related access fields */
+        aml_append(cpu_ctrl_dev,
+            aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base),
+                                 ACPI_CPU_HOTPLUG_REG_LEN));
+
+        field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK,
+                          AML_WRITE_AS_ZEROS);
+        aml_append(field, aml_reserved_field(ACPI_CPU_FLAGS_OFFSET_RW * 8));
+        /* 1 if enabled, read only */
+        aml_append(field, aml_named_field(CPU_ENABLED, 1));
+        /* (read) 1 if has a insert event. (write) 1 to clear event */
+        aml_append(field, aml_named_field(CPU_INSERT_EVENT, 1));
+        /* (read) 1 if has a remove event. (write) 1 to clear event */
+        aml_append(field, aml_named_field(CPU_REMOVE_EVENT, 1));
+        /* initiates device eject, write only */
+        aml_append(field, aml_named_field(CPU_EJECT_EVENT, 1));
+        aml_append(field, aml_reserved_field(4));
+        aml_append(field, aml_named_field(CPU_COMMAND, 8));
+        aml_append(cpu_ctrl_dev, field);
+
+        field = aml_field("PRST", AML_DWORD_ACC, AML_NOLOCK, AML_PRESERVE);
+        /* CPU selector, write only */
+        aml_append(field, aml_named_field(CPU_SELECTOR, 32));
+        /* flags + cmd + 2byte align */
+        aml_append(field, aml_reserved_field(4 * 8));
+        aml_append(field, aml_named_field(CPU_DATA, 32));
+        aml_append(cpu_ctrl_dev, field);
+
+        if (opts.has_legacy_cphp) {
+            method = aml_method("_INI", 0, AML_SERIALIZED);
+            /* switch off legacy CPU hotplug HW and use new one,
+             * on reboot system is in new mode and writing 0
+             * in CPU_SELECTOR selects BSP, which is NOP at
+             * the time _INI is called */
+            aml_append(method, aml_store(zero, aml_name(CPU_SELECTOR)));
+            aml_append(cpu_ctrl_dev, method);
+        }
+    }
+    aml_append(sb_scope, cpu_ctrl_dev);
+
+    cpus_dev = aml_device("\\_SB.CPUS");
+    {
+        int i;
+        Aml *ctrl_lock = aml_name("%s.%s", cphp_res_path, CPU_LOCK);
+        Aml *cpu_selector = aml_name("%s.%s", cphp_res_path, CPU_SELECTOR);
+        Aml *is_enabled = aml_name("%s.%s", cphp_res_path, CPU_ENABLED);
+        Aml *cpu_cmd = aml_name("%s.%s", cphp_res_path, CPU_COMMAND);
+        Aml *cpu_data = aml_name("%s.%s", cphp_res_path, CPU_DATA);
+        Aml *ins_evt = aml_name("%s.%s", cphp_res_path, CPU_INSERT_EVENT);
+        Aml *rm_evt = aml_name("%s.%s", cphp_res_path, CPU_REMOVE_EVENT);
+        Aml *ej_evt = aml_name("%s.%s", cphp_res_path, CPU_EJECT_EVENT);
+
+        aml_append(cpus_dev, aml_name_decl("_HID", aml_string("ACPI0010")));
+        aml_append(cpus_dev, aml_name_decl("_CID", aml_eisaid("PNP0A05")));
+
+        method = aml_method(CPU_NOTIFY_METHOD, 2, AML_NOTSERIALIZED);
+        for (i = 0; i < arch_ids->len; i++) {
+            Aml *cpu = aml_name(CPU_NAME_FMT, i);
+            Aml *uid = aml_arg(0);
+            Aml *event = aml_arg(1);
+
+            ifctx = aml_if(aml_equal(uid, aml_int(i)));
+            {
+                aml_append(ifctx, aml_notify(cpu, event));
+            }
+            aml_append(method, ifctx);
+        }
+        aml_append(cpus_dev, method);
+
+        method = aml_method(CPU_STS_METHOD, 1, AML_SERIALIZED);
+        {
+            Aml *idx = aml_arg(0);
+            Aml *sta = aml_local(0);
+
+            aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+            aml_append(method, aml_store(idx, cpu_selector));
+            aml_append(method, aml_store(zero, sta));
+            ifctx = aml_if(aml_equal(is_enabled, one));
+            {
+                aml_append(ifctx, aml_store(aml_int(0xF), sta));
+            }
+            aml_append(method, ifctx);
+            aml_append(method, aml_release(ctrl_lock));
+            aml_append(method, aml_return(sta));
+        }
+        aml_append(cpus_dev, method);
+
+        method = aml_method(CPU_EJECT_METHOD, 1, AML_SERIALIZED);
+        {
+            Aml *idx = aml_arg(0);
+
+            aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+            aml_append(method, aml_store(idx, cpu_selector));
+            aml_append(method, aml_store(one, ej_evt));
+            aml_append(method, aml_release(ctrl_lock));
+        }
+        aml_append(cpus_dev, method);
+
+        method = aml_method(CPU_SCAN_METHOD, 0, AML_SERIALIZED);
+        {
+            Aml *else_ctx;
+            Aml *while_ctx;
+            Aml *has_event = aml_local(0);
+            Aml *dev_chk = aml_int(1);
+            Aml *eject_req = aml_int(3);
+            Aml *next_cpu_cmd = aml_int(CPHP_GET_NEXT_CPU_WITH_EVENT_CMD);
+
+            aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+            aml_append(method, aml_store(one, has_event));
+            while_ctx = aml_while(aml_equal(has_event, one));
+            {
+                 /* clear loop exit condition, ins_evt/rm_evt checks
+                  * will set it to 1 while next_cpu_cmd returns a CPU
+                  * with events */
+                 aml_append(while_ctx, aml_store(zero, has_event));
+                 aml_append(while_ctx, aml_store(next_cpu_cmd, cpu_cmd));
+                 ifctx = aml_if(aml_equal(ins_evt, one));
+                 {
+                     aml_append(ifctx,
+                         aml_call2(CPU_NOTIFY_METHOD, cpu_data, dev_chk));
+                     aml_append(ifctx, aml_store(one, ins_evt));
+                     aml_append(ifctx, aml_store(one, has_event));
+                 }
+                 aml_append(while_ctx, ifctx);
+                 else_ctx = aml_else();
+                 ifctx = aml_if(aml_equal(rm_evt, one));
+                 {
+                     aml_append(ifctx,
+                         aml_call2(CPU_NOTIFY_METHOD, cpu_data, eject_req));
+                     aml_append(ifctx, aml_store(one, rm_evt));
+                     aml_append(ifctx, aml_store(one, has_event));
+                 }
+                 aml_append(else_ctx, ifctx);
+                 aml_append(while_ctx, else_ctx);
+            }
+            aml_append(method, while_ctx);
+            aml_append(method, aml_release(ctrl_lock));
+        }
+        aml_append(cpus_dev, method);
+
+        method = aml_method(CPU_OST_METHOD, 4, AML_SERIALIZED);
+        {
+            Aml *uid = aml_arg(0);
+            Aml *ev_cmd = aml_int(CPHP_OST_EVENT_CMD);
+            Aml *st_cmd = aml_int(CPHP_OST_STATUS_CMD);
+
+            aml_append(method, aml_acquire(ctrl_lock, 0xFFFF));
+            aml_append(method, aml_store(uid, cpu_selector));
+            aml_append(method, aml_store(ev_cmd, cpu_cmd));
+            aml_append(method, aml_store(aml_arg(1), cpu_data));
+            aml_append(method, aml_store(st_cmd, cpu_cmd));
+            aml_append(method, aml_store(aml_arg(2), cpu_data));
+            aml_append(method, aml_release(ctrl_lock));
+        }
+        aml_append(cpus_dev, method);
+
+        /* build Processor object for each processor */
+        for (i = 0; i < arch_ids->len; i++) {
+            Aml *dev;
+            Aml *uid = aml_int(i);
+            GArray *madt_buf = g_array_new(0, 1, 1);
+            int arch_id = arch_ids->cpus[i].arch_id;
+
+            if (opts.apci_1_compatible && arch_id < 255) {
+                dev = aml_processor(i, 0, 0, CPU_NAME_FMT, i);
+            } else {
+                dev = aml_device(CPU_NAME_FMT, i);
+                aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007")));
+                aml_append(dev, aml_name_decl("_UID", uid));
+            }
+
+            method = aml_method("_STA", 0, AML_SERIALIZED);
+            aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid)));
+            aml_append(dev, method);
+
+            /* build _MAT object */
+            assert(adevc && adevc->madt_cpu);
+            adevc->madt_cpu(adev, i, arch_ids, madt_buf);
+            switch (madt_buf->data[0]) {
+            case ACPI_APIC_PROCESSOR: {
+                AcpiMadtProcessorApic *apic = (void *)madt_buf->data;
+                apic->flags = cpu_to_le32(1);
+                break;
+            }
+            default:
+                assert(0);
+            }
+            aml_append(dev, aml_name_decl("_MAT",
+                aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data)));
+            g_array_free(madt_buf, true);
+
+            method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
+            aml_append(method, aml_call1(CPU_EJECT_METHOD, uid));
+            aml_append(dev, method);
+
+            method = aml_method("_OST", 3, AML_SERIALIZED);
+            aml_append(method,
+                aml_call4(CPU_OST_METHOD, uid, aml_arg(0),
+                          aml_arg(1), aml_arg(2))
+            );
+            aml_append(dev, method);
+            aml_append(cpus_dev, dev);
+        }
+    }
+    aml_append(sb_scope, cpus_dev);
+    aml_append(table, sb_scope);
+
+    method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED);
+    aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD));
+    aml_append(table, method);
+
+    g_free(cphp_res_path);
+    g_free(arch_ids);
+}
diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c
index fe75bd9ac9..e19d902063 100644
--- a/hw/acpi/cpu_hotplug.c
+++ b/hw/acpi/cpu_hotplug.c
@@ -34,7 +34,15 @@ static uint64_t cpu_status_read(void *opaque, hwaddr addr, unsigned int size)
 static void cpu_status_write(void *opaque, hwaddr addr, uint64_t data,
                              unsigned int size)
 {
-    /* TODO: implement VCPU removal on guest signal that CPU can be removed */
+    /* firmware never used to write in CPU present bitmap so use
+       this fact as means to switch QEMU into modern CPU hotplug
+       mode by writing 0 at the beginning of legacy CPU bitmap
+     */
+    if (addr == 0 && data == 0) {
+        AcpiCpuHotplug *cpus = opaque;
+        object_property_set_bool(cpus->device, false, "cpu-hotplug-legacy",
+                                 &error_abort);
+    }
 }
 
 static const MemoryRegionOps AcpiCpuHotplug_ops = {
@@ -83,6 +91,17 @@ void legacy_acpi_cpu_hotplug_init(MemoryRegion *parent, Object *owner,
     memory_region_init_io(&gpe_cpu->io, owner, &AcpiCpuHotplug_ops,
                           gpe_cpu, "acpi-cpu-hotplug", ACPI_GPE_PROC_LEN);
     memory_region_add_subregion(parent, base, &gpe_cpu->io);
+    gpe_cpu->device = owner;
+}
+
+void acpi_switch_to_modern_cphp(AcpiCpuHotplug *gpe_cpu,
+                                CPUHotplugState *cpuhp_state,
+                                uint16_t io_port)
+{
+    MemoryRegion *parent = pci_address_space_io(PCI_DEVICE(gpe_cpu->device));
+
+    memory_region_del_subregion(parent, &gpe_cpu->io);
+    cpu_hotplug_hw_init(parent, gpe_cpu->device, cpuhp_state, io_port);
 }
 
 void build_legacy_cpu_hotplug_aml(Aml *ctx, MachineState *machine,
diff --git a/hw/acpi/ich9.c b/hw/acpi/ich9.c
index 853c9c4eb7..e5a3c18e52 100644
--- a/hw/acpi/ich9.c
+++ b/hw/acpi/ich9.c
@@ -189,6 +189,33 @@ static const VMStateDescription vmstate_tco_io_state = {
     }
 };
 
+static bool vmstate_test_use_cpuhp(void *opaque)
+{
+    ICH9LPCPMRegs *s = opaque;
+    return !s->cpu_hotplug_legacy;
+}
+
+static int vmstate_cpuhp_pre_load(void *opaque)
+{
+    ICH9LPCPMRegs *s = opaque;
+    Object *obj = OBJECT(s->gpe_cpu.device);
+    object_property_set_bool(obj, false, "cpu-hotplug-legacy", &error_abort);
+    return 0;
+}
+
+static const VMStateDescription vmstate_cpuhp_state = {
+    .name = "ich9_pm/cpuhp",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .needed = vmstate_test_use_cpuhp,
+    .pre_load = vmstate_cpuhp_pre_load,
+    .fields      = (VMStateField[]) {
+        VMSTATE_CPU_HOTPLUG(cpuhp_state, ICH9LPCPMRegs),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 const VMStateDescription vmstate_ich9_pm = {
     .name = "ich9_pm",
     .version_id = 1,
@@ -209,6 +236,7 @@ const VMStateDescription vmstate_ich9_pm = {
     .subsections = (const VMStateDescription*[]) {
         &vmstate_memhp_state,
         &vmstate_tco_io_state,
+        &vmstate_cpuhp_state,
         NULL
     }
 };
@@ -306,6 +334,26 @@ static void ich9_pm_set_memory_hotplug_support(Object *obj, bool value,
     s->pm.acpi_memory_hotplug.is_enabled = value;
 }
 
+static bool ich9_pm_get_cpu_hotplug_legacy(Object *obj, Error **errp)
+{
+    ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
+
+    return s->pm.cpu_hotplug_legacy;
+}
+
+static void ich9_pm_set_cpu_hotplug_legacy(Object *obj, bool value,
+                                           Error **errp)
+{
+    ICH9LPCState *s = ICH9_LPC_DEVICE(obj);
+
+    assert(!value);
+    if (s->pm.cpu_hotplug_legacy && value == false) {
+        acpi_switch_to_modern_cphp(&s->pm.gpe_cpu, &s->pm.cpuhp_state,
+                                   ICH9_CPU_HOTPLUG_IO_BASE);
+    }
+    s->pm.cpu_hotplug_legacy = value;
+}
+
 static void ich9_pm_get_disable_s3(Object *obj, Visitor *v, const char *name,
                                    void *opaque, Error **errp)
 {
@@ -397,6 +445,7 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp)
 {
     static const uint32_t gpe0_len = ICH9_PMIO_GPE0_LEN;
     pm->acpi_memory_hotplug.is_enabled = true;
+    pm->cpu_hotplug_legacy = true;
     pm->disable_s3 = 0;
     pm->disable_s4 = 0;
     pm->s4_val = 2;
@@ -412,6 +461,10 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm, Error **errp)
                              ich9_pm_get_memory_hotplug_support,
                              ich9_pm_set_memory_hotplug_support,
                              NULL);
+    object_property_add_bool(obj, "cpu-hotplug-legacy",
+                             ich9_pm_get_cpu_hotplug_legacy,
+                             ich9_pm_set_cpu_hotplug_legacy,
+                             NULL);
     object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8",
                         ich9_pm_get_disable_s3,
                         ich9_pm_set_disable_s3,
@@ -440,7 +493,11 @@ void ich9_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
         acpi_memory_plug_cb(hotplug_dev, &lpc->pm.acpi_memory_hotplug,
                             dev, errp);
     } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
-        legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp);
+        if (lpc->pm.cpu_hotplug_legacy) {
+            legacy_acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.gpe_cpu, dev, errp);
+        } else {
+            acpi_cpu_plug_cb(hotplug_dev, &lpc->pm.cpuhp_state, dev, errp);
+        }
     } else {
         error_setg(errp, "acpi: device plug request for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -457,6 +514,10 @@ void ich9_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev,
         acpi_memory_unplug_request_cb(hotplug_dev,
                                       &lpc->pm.acpi_memory_hotplug, dev,
                                       errp);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+               !lpc->pm.cpu_hotplug_legacy) {
+        acpi_cpu_unplug_request_cb(hotplug_dev, &lpc->pm.cpuhp_state,
+                                   dev, errp);
     } else {
         error_setg(errp, "acpi: device unplug request for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -471,6 +532,9 @@ void ich9_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev,
     if (lpc->pm.acpi_memory_hotplug.is_enabled &&
         object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
         acpi_memory_unplug_cb(&lpc->pm.acpi_memory_hotplug, dev, errp);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+               !lpc->pm.cpu_hotplug_legacy) {
+        acpi_cpu_unplug_cb(&lpc->pm.cpuhp_state, dev, errp);
     } else {
         error_setg(errp, "acpi: device unplug for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -482,4 +546,7 @@ void ich9_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
     ICH9LPCState *s = ICH9_LPC_DEVICE(adev);
 
     acpi_memory_ospm_status(&s->pm.acpi_memory_hotplug, list);
+    if (!s->pm.cpu_hotplug_legacy) {
+        acpi_cpu_ospm_status(&s->pm.cpuhp_state, list);
+    }
 }
diff --git a/hw/acpi/ipmi.c b/hw/acpi/ipmi.c
new file mode 100644
index 0000000000..7e74ce4460
--- /dev/null
+++ b/hw/acpi/ipmi.c
@@ -0,0 +1,105 @@
+/*
+ * IPMI ACPI firmware handling
+ *
+ * Copyright (c) 2015,2016 Corey Minyard, MontaVista Software, LLC
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/ipmi/ipmi.h"
+#include "hw/acpi/aml-build.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ipmi.h"
+
+static Aml *aml_ipmi_crs(IPMIFwInfo *info)
+{
+    Aml *crs = aml_resource_template();
+
+    /*
+     * The base address is fixed and cannot change.  That may be different
+     * if someone does PCI, but we aren't there yet.
+     */
+    switch (info->memspace) {
+    case IPMI_MEMSPACE_IO:
+        aml_append(crs, aml_io(AML_DECODE16, info->base_address,
+                               info->base_address + info->register_length - 1,
+                               info->register_spacing, info->register_length));
+        break;
+    case IPMI_MEMSPACE_MEM32:
+        aml_append(crs,
+                   aml_dword_memory(AML_POS_DECODE,
+                            AML_MIN_FIXED, AML_MAX_FIXED,
+                            AML_NON_CACHEABLE, AML_READ_WRITE,
+                            0xffffffff,
+                            info->base_address,
+                            info->base_address + info->register_length - 1,
+                            info->register_spacing, info->register_length));
+        break;
+    case IPMI_MEMSPACE_MEM64:
+        aml_append(crs,
+                   aml_qword_memory(AML_POS_DECODE,
+                            AML_MIN_FIXED, AML_MAX_FIXED,
+                            AML_NON_CACHEABLE, AML_READ_WRITE,
+                            0xffffffffffffffffULL,
+                            info->base_address,
+                            info->base_address + info->register_length - 1,
+                            info->register_spacing, info->register_length));
+        break;
+    case IPMI_MEMSPACE_SMBUS:
+        aml_append(crs, aml_return(aml_int(info->base_address)));
+        break;
+    default:
+        abort();
+    }
+
+    if (info->interrupt_number) {
+        aml_append(crs, aml_irq_no_flags(info->interrupt_number));
+    }
+
+    return crs;
+}
+
+static Aml *aml_ipmi_device(IPMIFwInfo *info)
+{
+    Aml *dev;
+    uint16_t version = ((info->ipmi_spec_major_revision << 8)
+                        | (info->ipmi_spec_minor_revision << 4));
+
+    assert(info->ipmi_spec_minor_revision <= 15);
+
+    dev = aml_device("MI%d", info->uuid);
+    aml_append(dev, aml_name_decl("_HID", aml_eisaid("IPI0001")));
+    aml_append(dev, aml_name_decl("_STR", aml_string("ipmi_%s",
+                                                     info->interface_name)));
+    aml_append(dev, aml_name_decl("_UID", aml_int(info->uuid)));
+    aml_append(dev, aml_name_decl("_CRS", aml_ipmi_crs(info)));
+    aml_append(dev, aml_name_decl("_IFT", aml_int(info->interface_type)));
+    aml_append(dev, aml_name_decl("_SRV", aml_int(version)));
+
+    return dev;
+}
+
+void build_acpi_ipmi_devices(Aml *scope, BusState *bus)
+{
+
+    BusChild *kid;
+
+    QTAILQ_FOREACH(kid, &bus->children,  sibling) {
+        IPMIInterface *ii;
+        IPMIInterfaceClass *iic;
+        IPMIFwInfo info;
+        Object *obj = object_dynamic_cast(OBJECT(kid->child),
+                                          TYPE_IPMI_INTERFACE);
+
+        if (!obj) {
+            continue;
+        }
+
+        ii = IPMI_INTERFACE(obj);
+        iic = IPMI_INTERFACE_GET_CLASS(obj);
+        iic->get_fwinfo(ii, &info);
+        aml_append(scope, aml_ipmi_device(&info));
+    }
+}
diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c
index b4c22627df..e486128aa1 100644
--- a/hw/acpi/nvdimm.c
+++ b/hw/acpi/nvdimm.c
@@ -216,6 +216,26 @@ static uint32_t nvdimm_slot_to_dcr_index(int slot)
     return nvdimm_slot_to_spa_index(slot) + 1;
 }
 
+static NVDIMMDevice *nvdimm_get_device_by_handle(uint32_t handle)
+{
+    NVDIMMDevice *nvdimm = NULL;
+    GSList *list, *device_list = nvdimm_get_plugged_device_list();
+
+    for (list = device_list; list; list = list->next) {
+        NVDIMMDevice *nvd = list->data;
+        int slot = object_property_get_int(OBJECT(nvd), PC_DIMM_SLOT_PROP,
+                                           NULL);
+
+        if (nvdimm_slot_to_handle(slot) == handle) {
+            nvdimm = nvd;
+            break;
+        }
+    }
+
+    g_slist_free(device_list);
+    return nvdimm;
+}
+
 /* ACPI 6.0: 5.2.25.1 System Physical Address Range Structure */
 static void
 nvdimm_build_structure_spa(GArray *structures, DeviceState *dev)
@@ -406,6 +426,282 @@ struct NvdimmDsmFuncNoPayloadOut {
 } QEMU_PACKED;
 typedef struct NvdimmDsmFuncNoPayloadOut NvdimmDsmFuncNoPayloadOut;
 
+struct NvdimmFuncGetLabelSizeOut {
+    /* the size of buffer filled by QEMU. */
+    uint32_t len;
+    uint32_t func_ret_status; /* return status code. */
+    uint32_t label_size; /* the size of label data area. */
+    /*
+     * Maximum size of the namespace label data length supported by
+     * the platform in Get/Set Namespace Label Data functions.
+     */
+    uint32_t max_xfer;
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelSizeOut NvdimmFuncGetLabelSizeOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelSizeOut) > 4096);
+
+struct NvdimmFuncGetLabelDataIn {
+    uint32_t offset; /* the offset in the namespace label data area. */
+    uint32_t length; /* the size of data is to be read via the function. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelDataIn NvdimmFuncGetLabelDataIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataIn) +
+                  offsetof(NvdimmDsmIn, arg3) > 4096);
+
+struct NvdimmFuncGetLabelDataOut {
+    /* the size of buffer filled by QEMU. */
+    uint32_t len;
+    uint32_t func_ret_status; /* return status code. */
+    uint8_t out_buf[0]; /* the data got via Get Namesapce Label function. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > 4096);
+
+struct NvdimmFuncSetLabelDataIn {
+    uint32_t offset; /* the offset in the namespace label data area. */
+    uint32_t length; /* the size of data is to be written via the function. */
+    uint8_t in_buf[0]; /* the data written to label data area. */
+} QEMU_PACKED;
+typedef struct NvdimmFuncSetLabelDataIn NvdimmFuncSetLabelDataIn;
+QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncSetLabelDataIn) +
+                  offsetof(NvdimmDsmIn, arg3) > 4096);
+
+static void
+nvdimm_dsm_function0(uint32_t supported_func, hwaddr dsm_mem_addr)
+{
+    NvdimmDsmFunc0Out func0 = {
+        .len = cpu_to_le32(sizeof(func0)),
+        .supported_func = cpu_to_le32(supported_func),
+    };
+    cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof(func0));
+}
+
+static void
+nvdimm_dsm_no_payload(uint32_t func_ret_status, hwaddr dsm_mem_addr)
+{
+    NvdimmDsmFuncNoPayloadOut out = {
+        .len = cpu_to_le32(sizeof(out)),
+        .func_ret_status = cpu_to_le32(func_ret_status),
+    };
+    cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
+}
+
+static void nvdimm_dsm_root(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
+{
+    /*
+     * function 0 is called to inquire which functions are supported by
+     * OSPM
+     */
+    if (!in->function) {
+        nvdimm_dsm_function0(0 /* No function supported other than
+                                  function 0 */, dsm_mem_addr);
+        return;
+    }
+
+    /* No function except function 0 is supported yet. */
+    nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
+/*
+ * the max transfer size is the max size transferred by both a
+ * 'Get Namespace Label Data' function and a 'Set Namespace Label Data'
+ * function.
+ */
+static uint32_t nvdimm_get_max_xfer_label_size(void)
+{
+    uint32_t max_get_size, max_set_size, dsm_memory_size = 4096;
+
+    /*
+     * the max data ACPI can read one time which is transferred by
+     * the response of 'Get Namespace Label Data' function.
+     */
+    max_get_size = dsm_memory_size - sizeof(NvdimmFuncGetLabelDataOut);
+
+    /*
+     * the max data ACPI can write one time which is transferred by
+     * 'Set Namespace Label Data' function.
+     */
+    max_set_size = dsm_memory_size - offsetof(NvdimmDsmIn, arg3) -
+                   sizeof(NvdimmFuncSetLabelDataIn);
+
+    return MIN(max_get_size, max_set_size);
+}
+
+/*
+ * DSM Spec Rev1 4.4 Get Namespace Label Size (Function Index 4).
+ *
+ * It gets the size of Namespace Label data area and the max data size
+ * that Get/Set Namespace Label Data functions can transfer.
+ */
+static void nvdimm_dsm_label_size(NVDIMMDevice *nvdimm, hwaddr dsm_mem_addr)
+{
+    NvdimmFuncGetLabelSizeOut label_size_out = {
+        .len = cpu_to_le32(sizeof(label_size_out)),
+    };
+    uint32_t label_size, mxfer;
+
+    label_size = nvdimm->label_size;
+    mxfer = nvdimm_get_max_xfer_label_size();
+
+    nvdimm_debug("label_size %#x, max_xfer %#x.\n", label_size, mxfer);
+
+    label_size_out.func_ret_status = cpu_to_le32(0 /* Success */);
+    label_size_out.label_size = cpu_to_le32(label_size);
+    label_size_out.max_xfer = cpu_to_le32(mxfer);
+
+    cpu_physical_memory_write(dsm_mem_addr, &label_size_out,
+                              sizeof(label_size_out));
+}
+
+static uint32_t nvdimm_rw_label_data_check(NVDIMMDevice *nvdimm,
+                                           uint32_t offset, uint32_t length)
+{
+    uint32_t ret = 3 /* Invalid Input Parameters */;
+
+    if (offset + length < offset) {
+        nvdimm_debug("offset %#x + length %#x is overflow.\n", offset,
+                     length);
+        return ret;
+    }
+
+    if (nvdimm->label_size < offset + length) {
+        nvdimm_debug("position %#x is beyond label data (len = %" PRIx64 ").\n",
+                     offset + length, nvdimm->label_size);
+        return ret;
+    }
+
+    if (length > nvdimm_get_max_xfer_label_size()) {
+        nvdimm_debug("length (%#x) is larger than max_xfer (%#x).\n",
+                     length, nvdimm_get_max_xfer_label_size());
+        return ret;
+    }
+
+    return 0 /* Success */;
+}
+
+/*
+ * DSM Spec Rev1 4.5 Get Namespace Label Data (Function Index 5).
+ */
+static void nvdimm_dsm_get_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
+                                      hwaddr dsm_mem_addr)
+{
+    NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
+    NvdimmFuncGetLabelDataIn *get_label_data;
+    NvdimmFuncGetLabelDataOut *get_label_data_out;
+    uint32_t status;
+    int size;
+
+    get_label_data = (NvdimmFuncGetLabelDataIn *)in->arg3;
+    le32_to_cpus(&get_label_data->offset);
+    le32_to_cpus(&get_label_data->length);
+
+    nvdimm_debug("Read Label Data: offset %#x length %#x.\n",
+                 get_label_data->offset, get_label_data->length);
+
+    status = nvdimm_rw_label_data_check(nvdimm, get_label_data->offset,
+                                        get_label_data->length);
+    if (status != 0 /* Success */) {
+        nvdimm_dsm_no_payload(status, dsm_mem_addr);
+        return;
+    }
+
+    size = sizeof(*get_label_data_out) + get_label_data->length;
+    assert(size <= 4096);
+    get_label_data_out = g_malloc(size);
+
+    get_label_data_out->len = cpu_to_le32(size);
+    get_label_data_out->func_ret_status = cpu_to_le32(0 /* Success */);
+    nvc->read_label_data(nvdimm, get_label_data_out->out_buf,
+                         get_label_data->length, get_label_data->offset);
+
+    cpu_physical_memory_write(dsm_mem_addr, get_label_data_out, size);
+    g_free(get_label_data_out);
+}
+
+/*
+ * DSM Spec Rev1 4.6 Set Namespace Label Data (Function Index 6).
+ */
+static void nvdimm_dsm_set_label_data(NVDIMMDevice *nvdimm, NvdimmDsmIn *in,
+                                      hwaddr dsm_mem_addr)
+{
+    NVDIMMClass *nvc = NVDIMM_GET_CLASS(nvdimm);
+    NvdimmFuncSetLabelDataIn *set_label_data;
+    uint32_t status;
+
+    set_label_data = (NvdimmFuncSetLabelDataIn *)in->arg3;
+
+    le32_to_cpus(&set_label_data->offset);
+    le32_to_cpus(&set_label_data->length);
+
+    nvdimm_debug("Write Label Data: offset %#x length %#x.\n",
+                 set_label_data->offset, set_label_data->length);
+
+    status = nvdimm_rw_label_data_check(nvdimm, set_label_data->offset,
+                                        set_label_data->length);
+    if (status != 0 /* Success */) {
+        nvdimm_dsm_no_payload(status, dsm_mem_addr);
+        return;
+    }
+
+    assert(sizeof(*in) + sizeof(*set_label_data) + set_label_data->length <=
+           4096);
+
+    nvc->write_label_data(nvdimm, set_label_data->in_buf,
+                          set_label_data->length, set_label_data->offset);
+    nvdimm_dsm_no_payload(0 /* Success */, dsm_mem_addr);
+}
+
+static void nvdimm_dsm_device(NvdimmDsmIn *in, hwaddr dsm_mem_addr)
+{
+    NVDIMMDevice *nvdimm = nvdimm_get_device_by_handle(in->handle);
+
+    /* See the comments in nvdimm_dsm_root(). */
+    if (!in->function) {
+        uint32_t supported_func = 0;
+
+        if (nvdimm && nvdimm->label_size) {
+            supported_func |= 0x1 /* Bit 0 indicates whether there is
+                                     support for any functions other
+                                     than function 0. */ |
+                              1 << 4 /* Get Namespace Label Size */ |
+                              1 << 5 /* Get Namespace Label Data */ |
+                              1 << 6 /* Set Namespace Label Data */;
+        }
+        nvdimm_dsm_function0(supported_func, dsm_mem_addr);
+        return;
+    }
+
+    if (!nvdimm) {
+        nvdimm_dsm_no_payload(2 /* Non-Existing Memory Device */,
+                              dsm_mem_addr);
+        return;
+    }
+
+    /* Encode DSM function according to DSM Spec Rev1. */
+    switch (in->function) {
+    case 4 /* Get Namespace Label Size */:
+        if (nvdimm->label_size) {
+            nvdimm_dsm_label_size(nvdimm, dsm_mem_addr);
+            return;
+        }
+        break;
+    case 5 /* Get Namespace Label Data */:
+        if (nvdimm->label_size) {
+            nvdimm_dsm_get_label_data(nvdimm, in, dsm_mem_addr);
+            return;
+        }
+        break;
+    case 0x6 /* Set Namespace Label Data */:
+        if (nvdimm->label_size) {
+            nvdimm_dsm_set_label_data(nvdimm, in, dsm_mem_addr);
+            return;
+        }
+        break;
+    }
+
+    nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+}
+
 static uint64_t
 nvdimm_dsm_read(void *opaque, hwaddr addr, unsigned size)
 {
@@ -436,26 +732,22 @@ nvdimm_dsm_write(void *opaque, hwaddr addr, uint64_t val, unsigned size)
     nvdimm_debug("Revision %#x Handler %#x Function %#x.\n", in->revision,
                  in->handle, in->function);
 
-    /*
-     * function 0 is called to inquire which functions are supported by
-     * OSPM
-     */
-    if (in->function == 0) {
-        NvdimmDsmFunc0Out func0 = {
-            .len = cpu_to_le32(sizeof(func0)),
-             /* No function supported other than function 0 */
-            .supported_func = cpu_to_le32(0),
-        };
-        cpu_physical_memory_write(dsm_mem_addr, &func0, sizeof func0);
-    } else {
-        /* No function except function 0 is supported yet. */
-        NvdimmDsmFuncNoPayloadOut out = {
-            .len = cpu_to_le32(sizeof(out)),
-            .func_ret_status = cpu_to_le32(1)  /* Not Supported */,
-        };
-        cpu_physical_memory_write(dsm_mem_addr, &out, sizeof(out));
+    if (in->revision != 0x1 /* Currently we only support DSM Spec Rev1. */) {
+        nvdimm_debug("Revision %#x is not supported, expect %#x.\n",
+                     in->revision, 0x1);
+        nvdimm_dsm_no_payload(1 /* Not Supported */, dsm_mem_addr);
+        goto exit;
+    }
+
+     /* Handle 0 is reserved for NVDIMM Root Device. */
+    if (!in->handle) {
+        nvdimm_dsm_root(in, dsm_mem_addr);
+        goto exit;
     }
 
+    nvdimm_dsm_device(in, dsm_mem_addr);
+
+exit:
     g_free(in);
 }
 
@@ -487,18 +779,39 @@ void nvdimm_init_acpi_state(AcpiNVDIMMState *state, MemoryRegion *io,
 
 static void nvdimm_build_common_dsm(Aml *dev)
 {
-    Aml *method, *ifctx, *function, *dsm_mem, *unpatched, *result_size;
+    Aml *method, *ifctx, *function, *handle, *uuid, *dsm_mem, *result_size;
+    Aml *elsectx, *unsupport, *unpatched, *expected_uuid, *uuid_invalid;
+    Aml *pckg, *pckg_index, *pckg_buf;
     uint8_t byte_list[1];
 
-    method = aml_method(NVDIMM_COMMON_DSM, 4, AML_SERIALIZED);
+    method = aml_method(NVDIMM_COMMON_DSM, 5, AML_SERIALIZED);
+    uuid = aml_arg(0);
     function = aml_arg(2);
+    handle = aml_arg(4);
     dsm_mem = aml_name(NVDIMM_ACPI_MEM_ADDR);
 
     /*
      * do not support any method if DSM memory address has not been
      * patched.
      */
-    unpatched = aml_if(aml_equal(dsm_mem, aml_int(0x0)));
+    unpatched = aml_equal(dsm_mem, aml_int(0x0));
+
+    expected_uuid = aml_local(0);
+
+    ifctx = aml_if(aml_equal(handle, aml_int(0x0)));
+    aml_append(ifctx, aml_store(
+               aml_touuid("2F10E7A4-9E91-11E4-89D3-123B93F75CBA")
+               /* UUID for NVDIMM Root Device */, expected_uuid));
+    aml_append(method, ifctx);
+    elsectx = aml_else();
+    aml_append(elsectx, aml_store(
+               aml_touuid("4309AC30-0D11-11E4-9191-0800200C9A66")
+               /* UUID for NVDIMM Devices */, expected_uuid));
+    aml_append(method, elsectx);
+
+    uuid_invalid = aml_lnot(aml_equal(uuid, expected_uuid));
+
+    unsupport = aml_if(aml_or(unpatched, uuid_invalid, NULL));
 
     /*
      * function 0 is called to inquire what functions are supported by
@@ -507,24 +820,42 @@ static void nvdimm_build_common_dsm(Aml *dev)
     ifctx = aml_if(aml_equal(function, aml_int(0)));
     byte_list[0] = 0 /* No function Supported */;
     aml_append(ifctx, aml_return(aml_buffer(1, byte_list)));
-    aml_append(unpatched, ifctx);
+    aml_append(unsupport, ifctx);
 
     /* No function is supported yet. */
     byte_list[0] = 1 /* Not Supported */;
-    aml_append(unpatched, aml_return(aml_buffer(1, byte_list)));
-    aml_append(method, unpatched);
+    aml_append(unsupport, aml_return(aml_buffer(1, byte_list)));
+    aml_append(method, unsupport);
 
     /*
      * The HDLE indicates the DSM function is issued from which device,
-     * it is not used at this time as no function is supported yet.
-     * Currently we make it always be 0 for all the devices and will set
-     * the appropriate value once real function is implemented.
+     * it reserves 0 for root device and is the handle for NVDIMM devices.
+     * See the comments in nvdimm_slot_to_handle().
      */
-    aml_append(method, aml_store(aml_int(0x0), aml_name("HDLE")));
+    aml_append(method, aml_store(handle, aml_name("HDLE")));
     aml_append(method, aml_store(aml_arg(1), aml_name("REVS")));
     aml_append(method, aml_store(aml_arg(2), aml_name("FUNC")));
 
     /*
+     * The fourth parameter (Arg3) of _DSM is a package which contains
+     * a buffer, the layout of the buffer is specified by UUID (Arg0),
+     * Revision ID (Arg1) and Function Index (Arg2) which are documented
+     * in the DSM Spec.
+     */
+    pckg = aml_arg(3);
+    ifctx = aml_if(aml_and(aml_equal(aml_object_type(pckg),
+                   aml_int(4 /* Package */)) /* It is a Package? */,
+                   aml_equal(aml_sizeof(pckg), aml_int(1)) /* 1 element? */,
+                   NULL));
+
+    pckg_index = aml_local(2);
+    pckg_buf = aml_local(3);
+    aml_append(ifctx, aml_store(aml_index(pckg, aml_int(0)), pckg_index));
+    aml_append(ifctx, aml_store(aml_derefof(pckg_index), pckg_buf));
+    aml_append(ifctx, aml_store(pckg_buf, aml_name("ARG3")));
+    aml_append(method, ifctx);
+
+    /*
      * tell QEMU about the real address of DSM memory, then QEMU
      * gets the control and fills the result in DSM memory.
      */
@@ -542,13 +873,14 @@ static void nvdimm_build_common_dsm(Aml *dev)
     aml_append(dev, method);
 }
 
-static void nvdimm_build_device_dsm(Aml *dev)
+static void nvdimm_build_device_dsm(Aml *dev, uint32_t handle)
 {
     Aml *method;
 
     method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
-    aml_append(method, aml_return(aml_call4(NVDIMM_COMMON_DSM, aml_arg(0),
-                                  aml_arg(1), aml_arg(2), aml_arg(3))));
+    aml_append(method, aml_return(aml_call5(NVDIMM_COMMON_DSM, aml_arg(0),
+                                  aml_arg(1), aml_arg(2), aml_arg(3),
+                                  aml_int(handle))));
     aml_append(dev, method);
 }
 
@@ -573,7 +905,7 @@ static void nvdimm_build_nvdimm_devices(GSList *device_list, Aml *root_dev)
          */
         aml_append(nvdimm_dev, aml_name_decl("_ADR", aml_int(handle)));
 
-        nvdimm_build_device_dsm(nvdimm_dev);
+        nvdimm_build_device_dsm(nvdimm_dev, handle);
         aml_append(root_dev, nvdimm_dev);
     }
 }
@@ -665,7 +997,9 @@ static void nvdimm_build_ssdt(GSList *device_list, GArray *table_offsets,
     aml_append(dev, field);
 
     nvdimm_build_common_dsm(dev);
-    nvdimm_build_device_dsm(dev);
+
+    /* 0 is reserved for root device. */
+    nvdimm_build_device_dsm(dev, 0);
 
     nvdimm_build_nvdimm_devices(device_list, dev);
 
diff --git a/hw/acpi/piix4.c b/hw/acpi/piix4.c
index c48cb1b91a..2adc246b00 100644
--- a/hw/acpi/piix4.c
+++ b/hw/acpi/piix4.c
@@ -34,6 +34,7 @@
 #include "hw/acpi/piix4.h"
 #include "hw/acpi/pcihp.h"
 #include "hw/acpi/cpu_hotplug.h"
+#include "hw/acpi/cpu.h"
 #include "hw/hotplug.h"
 #include "hw/mem/pc-dimm.h"
 #include "hw/acpi/memory_hotplug.h"
@@ -86,7 +87,9 @@ typedef struct PIIX4PMState {
     uint8_t disable_s4;
     uint8_t s4_val;
 
+    bool cpu_hotplug_legacy;
     AcpiCpuHotplug gpe_cpu;
+    CPUHotplugState cpuhp_state;
 
     MemHotplugState acpi_memory_hotplug;
 } PIIX4PMState;
@@ -273,6 +276,32 @@ static const VMStateDescription vmstate_memhp_state = {
     }
 };
 
+static bool vmstate_test_use_cpuhp(void *opaque)
+{
+    PIIX4PMState *s = opaque;
+    return !s->cpu_hotplug_legacy;
+}
+
+static int vmstate_cpuhp_pre_load(void *opaque)
+{
+    Object *obj = OBJECT(opaque);
+    object_property_set_bool(obj, false, "cpu-hotplug-legacy", &error_abort);
+    return 0;
+}
+
+static const VMStateDescription vmstate_cpuhp_state = {
+    .name = "piix4_pm/cpuhp",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .needed = vmstate_test_use_cpuhp,
+    .pre_load = vmstate_cpuhp_pre_load,
+    .fields      = (VMStateField[]) {
+        VMSTATE_CPU_HOTPLUG(cpuhp_state, PIIX4PMState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 /* qemu-kvm 1.2 uses version 3 but advertised as 2
  * To support incoming qemu-kvm 1.2 migration, change version_id
  * and minimum_version_id to 2 below (which breaks migration from
@@ -307,6 +336,7 @@ static const VMStateDescription vmstate_acpi = {
     },
     .subsections = (const VMStateDescription*[]) {
          &vmstate_memhp_state,
+         &vmstate_cpuhp_state,
          NULL
     }
 };
@@ -352,7 +382,11 @@ static void piix4_device_plug_cb(HotplugHandler *hotplug_dev,
     } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
         acpi_pcihp_device_plug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev, errp);
     } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
-        legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp);
+        if (s->cpu_hotplug_legacy) {
+            legacy_acpi_cpu_plug_cb(hotplug_dev, &s->gpe_cpu, dev, errp);
+        } else {
+            acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
+        }
     } else {
         error_setg(errp, "acpi: device plug request for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -371,6 +405,9 @@ static void piix4_device_unplug_request_cb(HotplugHandler *hotplug_dev,
     } else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
         acpi_pcihp_device_unplug_cb(hotplug_dev, &s->acpi_pci_hotplug, dev,
                                     errp);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+               !s->cpu_hotplug_legacy) {
+        acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
     } else {
         error_setg(errp, "acpi: device unplug request for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -385,6 +422,9 @@ static void piix4_device_unplug_cb(HotplugHandler *hotplug_dev,
     if (s->acpi_memory_hotplug.is_enabled &&
         object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) {
         acpi_memory_unplug_cb(&s->acpi_memory_hotplug, dev, errp);
+    } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU) &&
+               !s->cpu_hotplug_legacy) {
+        acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp);
     } else {
         error_setg(errp, "acpi: device unplug for not supported device"
                    " type: %s", object_get_typename(OBJECT(dev)));
@@ -560,6 +600,26 @@ static const MemoryRegionOps piix4_gpe_ops = {
     .endianness = DEVICE_LITTLE_ENDIAN,
 };
 
+
+static bool piix4_get_cpu_hotplug_legacy(Object *obj, Error **errp)
+{
+    PIIX4PMState *s = PIIX4_PM(obj);
+
+    return s->cpu_hotplug_legacy;
+}
+
+static void piix4_set_cpu_hotplug_legacy(Object *obj, bool value, Error **errp)
+{
+    PIIX4PMState *s = PIIX4_PM(obj);
+
+    assert(!value);
+    if (s->cpu_hotplug_legacy && value == false) {
+        acpi_switch_to_modern_cphp(&s->gpe_cpu, &s->cpuhp_state,
+                                   PIIX4_CPU_HOTPLUG_IO_BASE);
+    }
+    s->cpu_hotplug_legacy = value;
+}
+
 static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
                                            PCIBus *bus, PIIX4PMState *s)
 {
@@ -570,6 +630,11 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
     acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent,
                     s->use_acpi_pci_hotplug);
 
+    s->cpu_hotplug_legacy = true;
+    object_property_add_bool(OBJECT(s), "cpu-hotplug-legacy",
+                             piix4_get_cpu_hotplug_legacy,
+                             piix4_set_cpu_hotplug_legacy,
+                             NULL);
     legacy_acpi_cpu_hotplug_init(parent, OBJECT(s), &s->gpe_cpu,
                                  PIIX4_CPU_HOTPLUG_IO_BASE);
 
@@ -583,6 +648,9 @@ static void piix4_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list)
     PIIX4PMState *s = PIIX4_PM(adev);
 
     acpi_memory_ospm_status(&s->acpi_memory_hotplug, list);
+    if (!s->cpu_hotplug_legacy) {
+        acpi_cpu_ospm_status(&s->cpuhp_state, list);
+    }
 }
 
 static void piix4_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
@@ -631,6 +699,7 @@ static void piix4_pm_class_init(ObjectClass *klass, void *data)
     hc->unplug = piix4_device_unplug_cb;
     adevc->ospm_status = piix4_ospm_status;
     adevc->send_event = piix4_send_gpe;
+    adevc->madt_cpu = pc_madt_cpu_entry;
 }
 
 static const TypeInfo piix4_pm_info = {
diff --git a/hw/acpi/trace-events b/hw/acpi/trace-events
index e95b2183ac..5aa3ba67c8 100644
--- a/hw/acpi/trace-events
+++ b/hw/acpi/trace-events
@@ -16,3 +16,17 @@ mhp_acpi_clear_insert_evt(uint32_t slot) "slot[0x%"PRIx32"] clear insert event"
 mhp_acpi_clear_remove_evt(uint32_t slot) "slot[0x%"PRIx32"] clear remove event"
 mhp_acpi_pc_dimm_deleted(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm deleted"
 mhp_acpi_pc_dimm_delete_failed(uint32_t slot) "slot[0x%"PRIx32"] pc-dimm delete failed"
+
+# hw/acpi/cpu.c
+cpuhp_acpi_invalid_idx_selected(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_read_flags(uint32_t idx, uint8_t flags) "idx[0x%"PRIx32"] flags: 0x%"PRIx8
+cpuhp_acpi_write_idx(uint32_t idx) "set active cpu idx: 0x%"PRIx32
+cpuhp_acpi_write_cmd(uint32_t idx, uint8_t cmd) "idx[0x%"PRIx32"] cmd: 0x%"PRIx8
+cpuhp_acpi_read_cmd_data(uint32_t idx, uint32_t data) "idx[0x%"PRIx32"] data: 0x%"PRIx32
+cpuhp_acpi_cpu_has_events(uint32_t idx, bool ins, bool rm) "idx[0x%"PRIx32"] inserting: %d, removing: %d"
+cpuhp_acpi_clear_inserting_evt(uint32_t idx) "idx[0x%"PRIx32"]"
+cpuhp_acpi_clear_remove_evt(uint32_t idx) "idx[0x%"PRIx32"]"
+cpuhp_acpi_ejecting_invalid_cpu(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_ejecting_cpu(uint32_t idx) "0x%"PRIx32
+cpuhp_acpi_write_ost_ev(uint32_t slot, uint32_t ev) "idx[0x%"PRIx32"] OST EVENT: 0x%"PRIx32
+cpuhp_acpi_write_ost_status(uint32_t slot, uint32_t st) "idx[0x%"PRIx32"] OST STATUS: 0x%"PRIx32