diff options
| -rw-r--r-- | hw/tpm/tpm_crb.c | 14 | ||||
| -rw-r--r-- | linux-user/elfload.c | 49 |
2 files changed, 61 insertions, 2 deletions
diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index d8917cb101..ef8b80e9aa 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -84,6 +84,12 @@ static uint64_t tpm_crb_mmio_read(void *opaque, hwaddr addr, unsigned offset = addr & 3; uint32_t val = *(uint32_t *)regs >> (8 * offset); + switch (addr) { + case A_CRB_LOC_STATE: + val |= !tpm_backend_get_tpm_established_flag(s->tpmbe); + break; + } + trace_tpm_crb_mmio_read(addr, size, val); return val; @@ -137,6 +143,8 @@ static void tpm_crb_mmio_write(void *opaque, hwaddr addr, /* not loc 3 or 4 */ break; case CRB_LOC_CTRL_RELINQUISH: + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, + locAssigned, 0); break; case CRB_LOC_CTRL_REQUEST_ACCESS: ARRAY_FIELD_DP32(s->regs, CRB_LOC_STS, @@ -145,8 +153,6 @@ static void tpm_crb_mmio_write(void *opaque, hwaddr addr, beenSeized, 0); ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, locAssigned, 1); - ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, - tpmRegValidSts, 1); break; } break; @@ -210,6 +216,10 @@ static void tpm_crb_reset(void *dev) tpm_backend_reset(s->tpmbe); + memset(s->regs, 0, sizeof(s->regs)); + + ARRAY_FIELD_DP32(s->regs, CRB_LOC_STATE, + tpmRegValidSts, 1); ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, InterfaceType, CRB_INTF_TYPE_CRB_ACTIVE); ARRAY_FIELD_DP32(s->regs, CRB_INTF_ID, diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 4563a3190b..23e34957f9 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -1889,6 +1889,55 @@ unsigned long init_guest_space(unsigned long host_start, /* Otherwise, a non-zero size region of memory needs to be mapped * and validated. */ + +#if defined(TARGET_ARM) && !defined(TARGET_AARCH64) + /* On 32-bit ARM, we need to map not just the usable memory, but + * also the commpage. Try to find a suitable place by allocating + * a big chunk for all of it. If host_start, then the naive + * strategy probably does good enough. + */ + if (!host_start) { + unsigned long guest_full_size, host_full_size, real_start; + + guest_full_size = + (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size; + host_full_size = guest_full_size - guest_start; + real_start = (unsigned long) + mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0); + if (real_start == (unsigned long)-1) { + if (host_size < host_full_size - qemu_host_page_size) { + /* We failed to map a continous segment, but we're + * allowed to have a gap between the usable memory and + * the commpage where other things can be mapped. + * This sparseness gives us more flexibility to find + * an address range. + */ + goto naive; + } + return (unsigned long)-1; + } + munmap((void *)real_start, host_full_size); + if (real_start & ~qemu_host_page_mask) { + /* The same thing again, but with an extra qemu_host_page_size + * so that we can shift around alignment. + */ + unsigned long real_size = host_full_size + qemu_host_page_size; + real_start = (unsigned long) + mmap(NULL, real_size, PROT_NONE, flags, -1, 0); + if (real_start == (unsigned long)-1) { + if (host_size < host_full_size - qemu_host_page_size) { + goto naive; + } + return (unsigned long)-1; + } + munmap((void *)real_start, real_size); + real_start = HOST_PAGE_ALIGN(real_start); + } + current_start = real_start; + } + naive: +#endif + while (1) { unsigned long real_start, real_size, aligned_size; aligned_size = real_size = host_size; |