diff options
88 files changed, 2884 insertions, 1001 deletions
diff --git a/block.c b/block.c index 946e3c896e..308a91c96b 100644 --- a/block.c +++ b/block.c @@ -532,20 +532,139 @@ out: return ret; } -int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) +/** + * Helper function for bdrv_create_file_fallback(): Resize @blk to at + * least the given @minimum_size. + * + * On success, return @blk's actual length. + * Otherwise, return -errno. + */ +static int64_t create_file_fallback_truncate(BlockBackend *blk, + int64_t minimum_size, Error **errp) { - BlockDriver *drv; + Error *local_err = NULL; + int64_t size; + int ret; + + ret = blk_truncate(blk, minimum_size, false, PREALLOC_MODE_OFF, &local_err); + if (ret < 0 && ret != -ENOTSUP) { + error_propagate(errp, local_err); + return ret; + } + + size = blk_getlength(blk); + if (size < 0) { + error_free(local_err); + error_setg_errno(errp, -size, + "Failed to inquire the new image file's length"); + return size; + } + + if (size < minimum_size) { + /* Need to grow the image, but we failed to do that */ + error_propagate(errp, local_err); + return -ENOTSUP; + } + + error_free(local_err); + local_err = NULL; + + return size; +} + +/** + * Helper function for bdrv_create_file_fallback(): Zero the first + * sector to remove any potentially pre-existing image header. + */ +static int create_file_fallback_zero_first_sector(BlockBackend *blk, + int64_t current_size, + Error **errp) +{ + int64_t bytes_to_clear; + int ret; + + bytes_to_clear = MIN(current_size, BDRV_SECTOR_SIZE); + if (bytes_to_clear) { + ret = blk_pwrite_zeroes(blk, 0, bytes_to_clear, BDRV_REQ_MAY_UNMAP); + if (ret < 0) { + error_setg_errno(errp, -ret, + "Failed to clear the new image's first sector"); + return ret; + } + } + + return 0; +} + +static int bdrv_create_file_fallback(const char *filename, BlockDriver *drv, + QemuOpts *opts, Error **errp) +{ + BlockBackend *blk; + QDict *options = qdict_new(); + int64_t size = 0; + char *buf = NULL; + PreallocMode prealloc; Error *local_err = NULL; int ret; + size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0); + buf = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC); + prealloc = qapi_enum_parse(&PreallocMode_lookup, buf, + PREALLOC_MODE_OFF, &local_err); + g_free(buf); + if (local_err) { + error_propagate(errp, local_err); + return -EINVAL; + } + + if (prealloc != PREALLOC_MODE_OFF) { + error_setg(errp, "Unsupported preallocation mode '%s'", + PreallocMode_str(prealloc)); + return -ENOTSUP; + } + + qdict_put_str(options, "driver", drv->format_name); + + blk = blk_new_open(filename, NULL, options, + BDRV_O_RDWR | BDRV_O_RESIZE, errp); + if (!blk) { + error_prepend(errp, "Protocol driver '%s' does not support image " + "creation, and opening the image failed: ", + drv->format_name); + return -EINVAL; + } + + size = create_file_fallback_truncate(blk, size, errp); + if (size < 0) { + ret = size; + goto out; + } + + ret = create_file_fallback_zero_first_sector(blk, size, errp); + if (ret < 0) { + goto out; + } + + ret = 0; +out: + blk_unref(blk); + return ret; +} + +int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp) +{ + BlockDriver *drv; + drv = bdrv_find_protocol(filename, true, errp); if (drv == NULL) { return -ENOENT; } - ret = bdrv_create(drv, filename, opts, &local_err); - error_propagate(errp, local_err); - return ret; + if (drv->bdrv_co_create_opts) { + return bdrv_create(drv, filename, opts, errp); + } else { + return bdrv_create_file_fallback(filename, drv, opts, errp); + } } /** @@ -1444,6 +1563,24 @@ QemuOptsList bdrv_runtime_opts = { }, }; +static QemuOptsList fallback_create_opts = { + .name = "fallback-create-opts", + .head = QTAILQ_HEAD_INITIALIZER(fallback_create_opts.head), + .desc = { + { + .name = BLOCK_OPT_SIZE, + .type = QEMU_OPT_SIZE, + .help = "Virtual disk size" + }, + { + .name = BLOCK_OPT_PREALLOC, + .type = QEMU_OPT_STRING, + .help = "Preallocation mode (allowed values: off)" + }, + { /* end of list */ } + } +}; + /* * Common part for opening disk images and files * @@ -4807,14 +4944,15 @@ BlockDriverState *bdrv_find_node(const char *node_name) } /* Put this QMP function here so it can access the static graph_bdrv_states. */ -BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp) +BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, + Error **errp) { BlockDeviceInfoList *list, *entry; BlockDriverState *bs; list = NULL; QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) { - BlockDeviceInfo *info = bdrv_block_device_info(NULL, bs, errp); + BlockDeviceInfo *info = bdrv_block_device_info(NULL, bs, flat, errp); if (!info) { qapi_free_BlockDeviceInfoList(list); return NULL; @@ -5771,15 +5909,13 @@ void bdrv_img_create(const char *filename, const char *fmt, return; } - if (!proto_drv->create_opts) { - error_setg(errp, "Protocol driver '%s' does not support image creation", - proto_drv->format_name); - return; - } - /* Create parameter list */ create_opts = qemu_opts_append(create_opts, drv->create_opts); - create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); + if (proto_drv->create_opts) { + create_opts = qemu_opts_append(create_opts, proto_drv->create_opts); + } else { + create_opts = qemu_opts_append(create_opts, &fallback_create_opts); + } opts = qemu_opts_create(create_opts, NULL, 0, &error_abort); diff --git a/block/backup-top.c b/block/backup-top.c index fa78f3256d..1bfb360bd3 100644 --- a/block/backup-top.c +++ b/block/backup-top.c @@ -48,11 +48,17 @@ static coroutine_fn int backup_top_co_preadv( } static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset, - uint64_t bytes) + uint64_t bytes, BdrvRequestFlags flags) { BDRVBackupTopState *s = bs->opaque; - uint64_t end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size); - uint64_t off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size); + uint64_t off, end; + + if (flags & BDRV_REQ_WRITE_UNCHANGED) { + return 0; + } + + off = QEMU_ALIGN_DOWN(offset, s->bcs->cluster_size); + end = QEMU_ALIGN_UP(offset + bytes, s->bcs->cluster_size); return block_copy(s->bcs, off, end - off, NULL); } @@ -60,7 +66,7 @@ static coroutine_fn int backup_top_cbw(BlockDriverState *bs, uint64_t offset, static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { - int ret = backup_top_cbw(bs, offset, bytes); + int ret = backup_top_cbw(bs, offset, bytes, 0); if (ret < 0) { return ret; } @@ -71,7 +77,7 @@ static int coroutine_fn backup_top_co_pdiscard(BlockDriverState *bs, static int coroutine_fn backup_top_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, BdrvRequestFlags flags) { - int ret = backup_top_cbw(bs, offset, bytes); + int ret = backup_top_cbw(bs, offset, bytes, flags); if (ret < 0) { return ret; } @@ -84,11 +90,9 @@ static coroutine_fn int backup_top_co_pwritev(BlockDriverState *bs, uint64_t bytes, QEMUIOVector *qiov, int flags) { - if (!(flags & BDRV_REQ_WRITE_UNCHANGED)) { - int ret = backup_top_cbw(bs, offset, bytes); - if (ret < 0) { - return ret; - } + int ret = backup_top_cbw(bs, offset, bytes, flags); + if (ret < 0) { + return ret; } return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); @@ -196,8 +200,13 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, return NULL; } - top->total_sectors = source->total_sectors; state = top->opaque; + top->total_sectors = source->total_sectors; + top->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED | + (BDRV_REQ_FUA & source->supported_write_flags); + top->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | + ((BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) & + source->supported_zero_flags); bdrv_ref(target); state->target = bdrv_attach_child(top, target, "target", &child_file, errp); diff --git a/block/file-posix.c b/block/file-posix.c index ab82ee1a67..6345477112 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -3477,67 +3477,6 @@ static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, return raw_do_pwrite_zeroes(bs, offset, bytes, flags, true); } -static int coroutine_fn hdev_co_create_opts(const char *filename, QemuOpts *opts, - Error **errp) -{ - int fd; - int ret = 0; - struct stat stat_buf; - int64_t total_size = 0; - bool has_prefix; - - /* This function is used by both protocol block drivers and therefore either - * of these prefixes may be given. - * The return value has to be stored somewhere, otherwise this is an error - * due to -Werror=unused-value. */ - has_prefix = - strstart(filename, "host_device:", &filename) || - strstart(filename, "host_cdrom:" , &filename); - - (void)has_prefix; - - ret = raw_normalize_devicepath(&filename, errp); - if (ret < 0) { - return ret; - } - - /* Read out options */ - total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), - BDRV_SECTOR_SIZE); - - fd = qemu_open(filename, O_WRONLY | O_BINARY); - if (fd < 0) { - ret = -errno; - error_setg_errno(errp, -ret, "Could not open device"); - return ret; - } - - if (fstat(fd, &stat_buf) < 0) { - ret = -errno; - error_setg_errno(errp, -ret, "Could not stat device"); - } else if (!S_ISBLK(stat_buf.st_mode) && !S_ISCHR(stat_buf.st_mode)) { - error_setg(errp, - "The given file is neither a block nor a character device"); - ret = -ENODEV; - } else if (lseek(fd, 0, SEEK_END) < total_size) { - error_setg(errp, "Device is too small"); - ret = -ENOSPC; - } - - if (!ret && total_size) { - uint8_t buf[BDRV_SECTOR_SIZE] = { 0 }; - int64_t zero_size = MIN(BDRV_SECTOR_SIZE, total_size); - if (lseek(fd, 0, SEEK_SET) == -1) { - ret = -errno; - } else { - ret = qemu_write_full(fd, buf, zero_size); - ret = ret == zero_size ? 0 : -errno; - } - } - qemu_close(fd); - return ret; -} - static BlockDriver bdrv_host_device = { .format_name = "host_device", .protocol_name = "host_device", @@ -3550,8 +3489,6 @@ static BlockDriver bdrv_host_device = { .bdrv_reopen_prepare = raw_reopen_prepare, .bdrv_reopen_commit = raw_reopen_commit, .bdrv_reopen_abort = raw_reopen_abort, - .bdrv_co_create_opts = hdev_co_create_opts, - .create_opts = &raw_create_opts, .mutable_opts = mutable_opts, .bdrv_co_invalidate_cache = raw_co_invalidate_cache, .bdrv_co_pwrite_zeroes = hdev_co_pwrite_zeroes, @@ -3678,8 +3615,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_reopen_prepare = raw_reopen_prepare, .bdrv_reopen_commit = raw_reopen_commit, .bdrv_reopen_abort = raw_reopen_abort, - .bdrv_co_create_opts = hdev_co_create_opts, - .create_opts = &raw_create_opts, .mutable_opts = mutable_opts, .bdrv_co_invalidate_cache = raw_co_invalidate_cache, @@ -3812,8 +3747,6 @@ static BlockDriver bdrv_host_cdrom = { .bdrv_reopen_prepare = raw_reopen_prepare, .bdrv_reopen_commit = raw_reopen_commit, .bdrv_reopen_abort = raw_reopen_abort, - .bdrv_co_create_opts = hdev_co_create_opts, - .create_opts = &raw_create_opts, .mutable_opts = mutable_opts, .bdrv_co_preadv = raw_co_preadv, diff --git a/block/iscsi.c b/block/iscsi.c index c8feaa2f0e..682abd8e09 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -2164,58 +2164,6 @@ static int coroutine_fn iscsi_co_truncate(BlockDriverState *bs, int64_t offset, return 0; } -static int coroutine_fn iscsi_co_create_opts(const char *filename, QemuOpts *opts, - Error **errp) -{ - int ret = 0; - int64_t total_size = 0; - BlockDriverState *bs; - IscsiLun *iscsilun = NULL; - QDict *bs_options; - Error *local_err = NULL; - - bs = bdrv_new(); - - /* Read out options */ - total_size = DIV_ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), - BDRV_SECTOR_SIZE); - bs->opaque = g_new0(struct IscsiLun, 1); - iscsilun = bs->opaque; - - bs_options = qdict_new(); - iscsi_parse_filename(filename, bs_options, &local_err); - if (local_err) { - error_propagate(errp, local_err); - ret = -EINVAL; - } else { - ret = iscsi_open(bs, bs_options, 0, NULL); - } - qobject_unref(bs_options); - - if (ret != 0) { - goto out; - } - iscsi_detach_aio_context(bs); - if (iscsilun->type != TYPE_DISK) { - ret = -ENODEV; - goto out; - } - if (bs->total_sectors < total_size) { - ret = -ENOSPC; - goto out; - } - - ret = 0; -out: - if (iscsilun->iscsi != NULL) { - iscsi_destroy_context(iscsilun->iscsi); - } - g_free(bs->opaque); - bs->opaque = NULL; - bdrv_unref(bs); - return ret; -} - static int iscsi_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) { IscsiLun *iscsilun = bs->opaque; @@ -2486,8 +2434,6 @@ static BlockDriver bdrv_iscsi = { .bdrv_parse_filename = iscsi_parse_filename, .bdrv_file_open = iscsi_open, .bdrv_close = iscsi_close, - .bdrv_co_create_opts = iscsi_co_create_opts, - .create_opts = &iscsi_create_opts, .bdrv_reopen_prepare = iscsi_reopen_prepare, .bdrv_reopen_commit = iscsi_reopen_commit, .bdrv_co_invalidate_cache = iscsi_co_invalidate_cache, @@ -2525,8 +2471,6 @@ static BlockDriver bdrv_iser = { .bdrv_parse_filename = iscsi_parse_filename, .bdrv_file_open = iscsi_open, .bdrv_close = iscsi_close, - .bdrv_co_create_opts = iscsi_co_create_opts, - .create_opts = &iscsi_create_opts, .bdrv_reopen_prepare = iscsi_reopen_prepare, .bdrv_reopen_commit = iscsi_reopen_commit, .bdrv_co_invalidate_cache = iscsi_co_invalidate_cache, diff --git a/block/nbd.c b/block/nbd.c index d085554f21..6d3b22f844 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -70,6 +70,7 @@ typedef struct BDRVNBDState { CoMutex send_mutex; CoQueue free_sema; Coroutine *connection_co; + Coroutine *teardown_co; QemuCoSleepState *connection_co_sleep_ns_state; bool drained; bool wait_drained_end; @@ -203,7 +204,15 @@ static void nbd_teardown_connection(BlockDriverState *bs) qemu_co_sleep_wake(s->connection_co_sleep_ns_state); } } - BDRV_POLL_WHILE(bs, s->connection_co); + if (qemu_in_coroutine()) { + s->teardown_co = qemu_coroutine_self(); + /* connection_co resumes us when it terminates */ + qemu_coroutine_yield(); + s->teardown_co = NULL; + } else { + BDRV_POLL_WHILE(bs, s->connection_co); + } + assert(!s->connection_co); } static bool nbd_client_connecting(BDRVNBDState *s) @@ -395,6 +404,9 @@ static coroutine_fn void nbd_connection_entry(void *opaque) s->ioc = NULL; } + if (s->teardown_co) { + aio_co_wake(s->teardown_co); + } aio_wait_kick(); } diff --git a/block/qapi.c b/block/qapi.c index 9a5d0c9b27..afd9f3b4a7 100644 --- a/block/qapi.c +++ b/block/qapi.c @@ -42,7 +42,9 @@ #include "qemu/cutils.h" BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, - BlockDriverState *bs, Error **errp) + BlockDriverState *bs, + bool flat, + Error **errp) { ImageInfo **p_image_info; BlockDriverState *bs0; @@ -156,6 +158,11 @@ BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, return NULL; } + /* stop gathering data for flat output */ + if (flat) { + break; + } + if (bs0->drv && bs0->backing) { info->backing_file_depth++; bs0 = bs0->backing->bs; @@ -389,7 +396,7 @@ static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, if (bs && bs->drv) { info->has_inserted = true; - info->inserted = bdrv_block_device_info(blk, bs, errp); + info->inserted = bdrv_block_device_info(blk, bs, false, errp); if (info->inserted == NULL) { goto err; } @@ -657,7 +664,7 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn) char *sizing = NULL; if (!sn) { - qemu_printf("%-10s%-20s%7s%20s%15s", + qemu_printf("%-10s%-20s%11s%20s%15s", "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK"); } else { ti = sn->date_sec; @@ -672,7 +679,7 @@ void bdrv_snapshot_dump(QEMUSnapshotInfo *sn) (int)(secs % 60), (int)((sn->vm_clock_nsec / 1000000) % 1000)); sizing = size_to_str(sn->vm_state_size); - qemu_printf("%-10s%-20s%7s%20s%15s", + qemu_printf("%-10s%-20s%11s%20s%15s", sn->id_str, sn->name, sizing, date_buf, diff --git a/block/qcow2.c b/block/qcow2.c index 8dcee5efec..3c754f616b 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -135,13 +135,16 @@ static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen, s->crypto_header.length = headerlen; s->crypto_header.offset = ret; - /* Zero fill remaining space in cluster so it has predictable - * content in case of future spec changes */ + /* + * Zero fill all space in cluster so it has predictable + * content, as we may not initialize some regions of the + * header (eg only 1 out of 8 key slots will be initialized) + */ clusterlen = size_to_clusters(s, headerlen) * s->cluster_size; assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0); ret = bdrv_pwrite_zeroes(bs->file, - ret + headerlen, - clusterlen - headerlen, 0); + ret, + clusterlen, 0); if (ret < 0) { error_setg_errno(errp, -ret, "Could not zero fill encryption header"); return -1; diff --git a/blockdev.c b/blockdev.c index 45de0ba37e..011dcfec27 100644 --- a/blockdev.c +++ b/blockdev.c @@ -3734,9 +3734,13 @@ void qmp_drive_backup(DriveBackup *backup, Error **errp) blockdev_do_action(&action, errp); } -BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp) +BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat, + bool flat, + Error **errp) { - return bdrv_named_nodes_list(errp); + bool return_flat = has_flat && flat; + + return bdrv_named_nodes_list(return_flat, errp); } XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp) diff --git a/default-configs/ppc64-softmmu.mak b/default-configs/ppc64-softmmu.mak index cca52665d9..ae0841fa3a 100644 --- a/default-configs/ppc64-softmmu.mak +++ b/default-configs/ppc64-softmmu.mak @@ -8,3 +8,4 @@ CONFIG_POWERNV=y # For pSeries CONFIG_PSERIES=y +CONFIG_NVDIMM=y diff --git a/docs/interop/qcow2.txt b/docs/interop/qcow2.txt index af5711e533..5597e24474 100644 --- a/docs/interop/qcow2.txt +++ b/docs/interop/qcow2.txt @@ -79,9 +79,9 @@ The first cluster of a qcow2 image contains the file header: Offset into the image file at which the snapshot table starts. Must be aligned to a cluster boundary. -If the version is 3 or higher, the header has the following additional fields. -For version 2, the values are assumed to be zero, unless specified otherwise -in the description of a field. +For version 2, the header is exactly 72 bytes in length, and finishes here. +For version 3 or higher, the header length is at least 104 bytes, including +the next fields through header_length. 72 - 79: incompatible_features Bitmask of incompatible features. An implementation must @@ -109,7 +109,12 @@ in the description of a field. An External Data File Name header extension may be present if this bit is set. - Bits 3-63: Reserved (set to 0) + Bit 3: Compression type bit. If this bit is set, + a non-default compression is used for compressed + clusters. The compression_type field must be + present and not zero. + + Bits 4-63: Reserved (set to 0) 80 - 87: compatible_features Bitmask of compatible features. An implementation can @@ -164,6 +169,57 @@ in the description of a field. 100 - 103: header_length Length of the header structure in bytes. For version 2 images, the length is always assumed to be 72 bytes. + For version 3 it's at least 104 bytes and must be a multiple + of 8. + + +=== Additional fields (version 3 and higher) === + +In general, these fields are optional and may be safely ignored by the software, +as well as filled by zeros (which is equal to field absence), if software needs +to set field B, but does not care about field A which precedes B. More +formally, additional fields have the following compatibility rules: + +1. If the value of the additional field must not be ignored for correct +handling of the file, it will be accompanied by a corresponding incompatible +feature bit. + +2. If there are no unrecognized incompatible feature bits set, an unknown +additional field may be safely ignored other than preserving its value when +rewriting the image header. + +3. An explicit value of 0 will have the same behavior as when the field is not +present*, if not altered by a specific incompatible bit. + +*. A field is considered not present when header_length is less than or equal +to the field's offset. Also, all additional fields are not present for +version 2. + + 104: compression_type + + Defines the compression method used for compressed clusters. + All compressed clusters in an image use the same compression + type. + + If the incompatible bit "Compression type" is set: the field + must be present and non-zero (which means non-zlib + compression type). Otherwise, this field must not be present + or must be zero (which means zlib). + + Available compression type values: + 0: zlib <https://www.zlib.net/> + + +=== Header padding === + +@header_length must be a multiple of 8, which means that if the end of the last +additional field is not aligned, some padding is needed. This padding must be +zeroed, so that if some existing (or future) additional field will fall into +the padding, it will be interpreted accordingly to point [3.] of the previous +paragraph, i.e. in the same manner as when this field is not present. + + +=== Header extensions === Directly after the image header, optional sections called header extensions can be stored. Each extension has a structure like the following: diff --git a/docs/interop/qemu-img.rst b/docs/interop/qemu-img.rst index 42e4451db4..5f40137c10 100644 --- a/docs/interop/qemu-img.rst +++ b/docs/interop/qemu-img.rst @@ -214,6 +214,13 @@ Parameters to convert subcommand: will still be printed. Areas that cannot be read from the source will be treated as containing only zeroes. +.. option:: --target-is-zero + + Assume that reading the destination image will always return + zeros. This parameter is mutually exclusive with a destination image + that has a backing file. It is required to also use the ``-n`` + parameter to skip image creation. + Parameters to dd subcommand: .. program:: qemu-img-dd @@ -366,7 +373,7 @@ Command description: 4 Error on reading data -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM* to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 9fdad6dc3f..5219dd0e2e 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -32,33 +32,7 @@ #include "hw/acpi/bios-linker-loader.h" #include "hw/nvram/fw_cfg.h" #include "hw/mem/nvdimm.h" - -static int nvdimm_device_list(Object *obj, void *opaque) -{ - GSList **list = opaque; - - if (object_dynamic_cast(obj, TYPE_NVDIMM)) { - *list = g_slist_append(*list, DEVICE(obj)); - } - - object_child_foreach(obj, nvdimm_device_list, opaque); - return 0; -} - -/* - * inquire NVDIMM devices and link them into the list which is - * returned to the caller. - * - * Note: it is the caller's responsibility to free the list to avoid - * memory leak. - */ -static GSList *nvdimm_get_device_list(void) -{ - GSList *list = NULL; - - object_child_foreach(qdev_get_machine(), nvdimm_device_list, &list); - return list; -} +#include "qemu/nvdimm-utils.h" #define NVDIMM_UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ { (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index 1cde165611..2ae9c15311 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -24,11 +24,15 @@ #include "hw/arm/allwinner-a10.h" #include "hw/misc/unimp.h" #include "sysemu/sysemu.h" +#include "hw/boards.h" +#include "hw/usb/hcd-ohci.h" #define AW_A10_PIC_REG_BASE 0x01c20400 #define AW_A10_PIT_REG_BASE 0x01c20c00 #define AW_A10_UART0_REG_BASE 0x01c28000 #define AW_A10_EMAC_BASE 0x01c0b000 +#define AW_A10_EHCI_BASE 0x01c14000 +#define AW_A10_OHCI_BASE 0x01c14400 #define AW_A10_SATA_BASE 0x01c18000 static void aw_a10_init(Object *obj) @@ -49,6 +53,17 @@ static void aw_a10_init(Object *obj) sysbus_init_child_obj(obj, "sata", &s->sata, sizeof(s->sata), TYPE_ALLWINNER_AHCI); + + if (machine_usb(current_machine)) { + int i; + + for (i = 0; i < AW_A10_NUM_USB; i++) { + sysbus_init_child_obj(obj, "ehci[*]", OBJECT(&s->ehci[i]), + sizeof(s->ehci[i]), TYPE_PLATFORM_EHCI); + sysbus_init_child_obj(obj, "ohci[*]", OBJECT(&s->ohci[i]), + sizeof(s->ohci[i]), TYPE_SYSBUS_OHCI); + } + } } static void aw_a10_realize(DeviceState *dev, Error **errp) @@ -121,6 +136,34 @@ static void aw_a10_realize(DeviceState *dev, Error **errp) serial_mm_init(get_system_memory(), AW_A10_UART0_REG_BASE, 2, qdev_get_gpio_in(dev, 1), 115200, serial_hd(0), DEVICE_NATIVE_ENDIAN); + + if (machine_usb(current_machine)) { + int i; + + for (i = 0; i < AW_A10_NUM_USB; i++) { + char bus[16]; + + sprintf(bus, "usb-bus.%d", i); + + object_property_set_bool(OBJECT(&s->ehci[i]), true, + "companion-enable", &error_fatal); + object_property_set_bool(OBJECT(&s->ehci[i]), true, "realized", + &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ehci[i]), 0, + AW_A10_EHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, + qdev_get_gpio_in(dev, 39 + i)); + + object_property_set_str(OBJECT(&s->ohci[i]), bus, "masterbus", + &error_fatal); + object_property_set_bool(OBJECT(&s->ohci[i]), true, "realized", + &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->ohci[i]), 0, + AW_A10_OHCI_BASE + i * 0x8000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->ohci[i]), 0, + qdev_get_gpio_in(dev, 64 + i)); + } + } } static void aw_a10_class_init(ObjectClass *oc, void *data) diff --git a/hw/arm/mainstone.c b/hw/arm/mainstone.c index b01ce3ce08..6e64dfab50 100644 --- a/hw/arm/mainstone.c +++ b/hw/arm/mainstone.c @@ -138,19 +138,10 @@ static void mainstone_common_init(MemoryRegion *address_space_mem, /* There are two 32MiB flash devices on the board */ for (i = 0; i < 2; i ++) { dinfo = drive_get(IF_PFLASH, 0, i); - if (!dinfo) { - if (qtest_enabled()) { - break; - } - error_report("Two flash images must be given with the " - "'pflash' parameter"); - exit(1); - } - if (!pflash_cfi01_register(mainstone_flash_base[i], i ? "mainstone.flash1" : "mainstone.flash0", MAINSTONE_FLASH, - blk_by_legacy_dinfo(dinfo), + dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, sector_len, 4, 0, 0, 0, 0, be)) { error_report("Error registering flash memory"); exit(1); diff --git a/hw/arm/z2.c b/hw/arm/z2.c index 34794fe3ae..4bb237f22d 100644 --- a/hw/arm/z2.c +++ b/hw/arm/z2.c @@ -314,12 +314,6 @@ static void z2_init(MachineState *machine) be = 0; #endif dinfo = drive_get(IF_PFLASH, 0, 0); - if (!dinfo && !qtest_enabled()) { - error_report("Flash image must be given with the " - "'pflash' parameter"); - exit(1); - } - if (!pflash_cfi01_register(Z2_FLASH_BASE, "z2.flash0", Z2_FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, sector_len, 4, 0, 0, 0, 0, be)) { diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index f9e0eeaace..22a43e4984 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1227,17 +1227,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) case 0xd44: /* PFR1. */ return cpu->id_pfr1; case 0xd48: /* DFR0. */ - return cpu->id_dfr0; + return cpu->isar.id_dfr0; case 0xd4c: /* AFR0. */ return cpu->id_afr0; case 0xd50: /* MMFR0. */ - return cpu->id_mmfr0; + return cpu->isar.id_mmfr0; case 0xd54: /* MMFR1. */ - return cpu->id_mmfr1; + return cpu->isar.id_mmfr1; case 0xd58: /* MMFR2. */ - return cpu->id_mmfr2; + return cpu->isar.id_mmfr2; case 0xd5c: /* MMFR3. */ - return cpu->id_mmfr3; + return cpu->isar.id_mmfr3; case 0xd60: /* ISAR0. */ return cpu->isar.id_isar0; case 0xd64: /* ISAR1. */ diff --git a/hw/mem/Kconfig b/hw/mem/Kconfig index 620fd4cb59..2ad052a536 100644 --- a/hw/mem/Kconfig +++ b/hw/mem/Kconfig @@ -8,4 +8,4 @@ config MEM_DEVICE config NVDIMM bool default y - depends on PC + depends on (PC || PSERIES) diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c index 39f1426d1f..8e426d24bb 100644 --- a/hw/mem/nvdimm.c +++ b/hw/mem/nvdimm.c @@ -69,11 +69,51 @@ out: error_propagate(errp, local_err); } +static void nvdimm_get_uuid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + char *value = NULL; + + value = qemu_uuid_unparse_strdup(&nvdimm->uuid); + + visit_type_str(v, name, &value, errp); + g_free(value); +} + + +static void nvdimm_set_uuid(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(obj); + Error *local_err = NULL; + char *value; + + visit_type_str(v, name, &value, &local_err); + if (local_err) { + goto out; + } + + if (qemu_uuid_parse(value, &nvdimm->uuid) != 0) { + error_setg(errp, "Property '%s.%s' has invalid value", + object_get_typename(obj), name); + goto out; + } + g_free(value); + +out: + error_propagate(errp, local_err); +} + + static void nvdimm_init(Object *obj) { object_property_add(obj, NVDIMM_LABEL_SIZE_PROP, "int", nvdimm_get_label_size, nvdimm_set_label_size, NULL, NULL, NULL); + + object_property_add(obj, NVDIMM_UUID_PROP, "QemuUUID", nvdimm_get_uuid, + nvdimm_set_uuid, NULL, NULL, NULL); } static void nvdimm_finalize(Object *obj) diff --git a/hw/misc/aspeed_scu.c b/hw/misc/aspeed_scu.c index ce2f9562d4..9d7482a9df 100644 --- a/hw/misc/aspeed_scu.c +++ b/hw/misc/aspeed_scu.c @@ -77,6 +77,8 @@ #define CPU2_BASE_SEG4 TO_REG(0x110) #define CPU2_BASE_SEG5 TO_REG(0x114) #define CPU2_CACHE_CTRL TO_REG(0x118) +#define CHIP_ID0 TO_REG(0x150) +#define CHIP_ID1 TO_REG(0x154) #define UART_HPLL_CLK TO_REG(0x160) #define PCIE_CTRL TO_REG(0x180) #define BMC_MMIO_CTRL TO_REG(0x184) @@ -115,6 +117,8 @@ #define AST2600_HW_STRAP2_PROT TO_REG(0x518) #define AST2600_RNG_CTRL TO_REG(0x524) #define AST2600_RNG_DATA TO_REG(0x540) +#define AST2600_CHIP_ID0 TO_REG(0x5B0) +#define AST2600_CHIP_ID1 TO_REG(0x5B4) #define AST2600_CLK TO_REG(0x40) @@ -182,6 +186,8 @@ static const uint32_t ast2500_a1_resets[ASPEED_SCU_NR_REGS] = { [CPU2_BASE_SEG1] = 0x80000000U, [CPU2_BASE_SEG4] = 0x1E600000U, [CPU2_BASE_SEG5] = 0xC0000000U, + [CHIP_ID0] = 0x1234ABCDU, + [CHIP_ID1] = 0x88884444U, [UART_HPLL_CLK] = 0x00001903U, [PCIE_CTRL] = 0x0000007BU, [BMC_DEV_ID] = 0x00002402U @@ -232,8 +238,47 @@ static uint64_t aspeed_scu_read(void *opaque, hwaddr offset, unsigned size) return s->regs[reg]; } -static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data, - unsigned size) +static void aspeed_ast2400_scu_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size) +{ + AspeedSCUState *s = ASPEED_SCU(opaque); + int reg = TO_REG(offset); + + if (reg >= ASPEED_SCU_NR_REGS) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Out-of-bounds write at offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + if (reg > PROT_KEY && reg < CPU2_BASE_SEG1 && + !s->regs[PROT_KEY]) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: SCU is locked!\n", __func__); + } + + trace_aspeed_scu_write(offset, size, data); + + switch (reg) { + case PROT_KEY: + s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; + return; + case SILICON_REV: + case FREQ_CNTR_EVAL: + case VGA_SCRATCH1 ... VGA_SCRATCH8: + case RNG_DATA: + case FREE_CNTR4: + case FREE_CNTR4_EXT: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return; + } + + s->regs[reg] = data; +} + +static void aspeed_ast2500_scu_write(void *opaque, hwaddr offset, + uint64_t data, unsigned size) { AspeedSCUState *s = ASPEED_SCU(opaque); int reg = TO_REG(offset); @@ -257,31 +302,19 @@ static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data, case PROT_KEY: s->regs[reg] = (data == ASPEED_SCU_PROT_KEY) ? 1 : 0; return; - case CLK_SEL: - s->regs[reg] = data; - break; case HW_STRAP1: - if (ASPEED_IS_AST2500(s->regs[SILICON_REV])) { - s->regs[HW_STRAP1] |= data; - return; - } - /* Jump to assignment below */ - break; + s->regs[HW_STRAP1] |= data; + return; case SILICON_REV: - if (ASPEED_IS_AST2500(s->regs[SILICON_REV])) { - s->regs[HW_STRAP1] &= ~data; - } else { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", - __func__, offset); - } - /* Avoid assignment below, we've handled everything */ + s->regs[HW_STRAP1] &= ~data; return; case FREQ_CNTR_EVAL: case VGA_SCRATCH1 ... VGA_SCRATCH8: case RNG_DATA: case FREE_CNTR4: case FREE_CNTR4_EXT: + case CHIP_ID0: + case CHIP_ID1: qemu_log_mask(LOG_GUEST_ERROR, "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", __func__, offset); @@ -291,9 +324,18 @@ static void aspeed_scu_write(void *opaque, hwaddr offset, uint64_t data, s->regs[reg] = data; } -static const MemoryRegionOps aspeed_scu_ops = { +static const MemoryRegionOps aspeed_ast2400_scu_ops = { + .read = aspeed_scu_read, + .write = aspeed_ast2400_scu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .valid.unaligned = false, +}; + +static const MemoryRegionOps aspeed_ast2500_scu_ops = { .read = aspeed_scu_read, - .write = aspeed_scu_write, + .write = aspeed_ast2500_scu_write, .endianness = DEVICE_LITTLE_ENDIAN, .valid.min_access_size = 4, .valid.max_access_size = 4, @@ -469,7 +511,7 @@ static void aspeed_2400_scu_class_init(ObjectClass *klass, void *data) asc->calc_hpll = aspeed_2400_scu_calc_hpll; asc->apb_divider = 2; asc->nr_regs = ASPEED_SCU_NR_REGS; - asc->ops = &aspeed_scu_ops; + asc->ops = &aspeed_ast2400_scu_ops; } static const TypeInfo aspeed_2400_scu_info = { @@ -489,7 +531,7 @@ static void aspeed_2500_scu_class_init(ObjectClass *klass, void *data) asc->calc_hpll = aspeed_2500_scu_calc_hpll; asc->apb_divider = 4; asc->nr_regs = ASPEED_SCU_NR_REGS; - asc->ops = &aspeed_scu_ops; + asc->ops = &aspeed_ast2500_scu_ops; } static const TypeInfo aspeed_2500_scu_info = { @@ -586,6 +628,8 @@ static void aspeed_ast2600_scu_write(void *opaque, hwaddr offset, case AST2600_RNG_DATA: case AST2600_SILICON_REV: case AST2600_SILICON_REV2: + case AST2600_CHIP_ID0: + case AST2600_CHIP_ID1: /* Add read only registers here */ qemu_log_mask(LOG_GUEST_ERROR, "%s: Write to read-only offset 0x%" HWADDR_PRIx "\n", @@ -614,6 +658,9 @@ static const uint32_t ast2600_a0_resets[ASPEED_AST2600_SCU_NR_REGS] = { [AST2600_CLK_STOP_CTRL2] = 0xFFF0FFF0, [AST2600_SDRAM_HANDSHAKE] = 0x00000040, /* SoC completed DRAM init */ [AST2600_HPLL_PARAM] = 0x1000405F, + [AST2600_CHIP_ID0] = 0x1234ABCD, + [AST2600_CHIP_ID1] = 0x88884444, + }; static void aspeed_ast2600_scu_reset(DeviceState *dev) diff --git a/hw/misc/iotkit-secctl.c b/hw/misc/iotkit-secctl.c index 609869821a..9fdb82056a 100644 --- a/hw/misc/iotkit-secctl.c +++ b/hw/misc/iotkit-secctl.c @@ -340,7 +340,7 @@ static MemTxResult iotkit_secctl_s_write(void *opaque, hwaddr addr, qemu_set_irq(s->sec_resp_cfg, s->secrespcfg); break; case A_SECPPCINTCLR: - value &= 0x00f000f3; + s->secppcintstat &= ~(value & 0x00f000f3); foreach_ppc(s, iotkit_secctl_ppc_update_irq_clear); break; case A_SECPPCINTEN: diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index ecfc1b2c4e..d645468f4a 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -220,7 +220,7 @@ static void phb3_msi_resend(ICSState *ics) if ((msi->rba[i] & (1ull << j)) == 0) { continue; } - msi->rba[i] &= ~(1u << j); + msi->rba[i] &= ~(1ull << j); phb3_msi_try_send(msi, i * 64 + j, true); } } diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c index f232228b0e..7b9a121246 100644 --- a/hw/pci-host/pnv_phb3_pbcq.c +++ b/hw/pci-host/pnv_phb3_pbcq.c @@ -173,6 +173,7 @@ static void pnv_pbcq_pci_xscom_write(void *opaque, hwaddr addr, case PBCQ_PCI_BAR2: pbcq->pci_regs[reg] = val & 0xfffffffffc000000ull; pnv_pbcq_update_map(pbcq); + break; default: phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, addr, val); diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c index 68e1db3eac..911d147ffd 100644 --- a/hw/pci-host/pnv_phb4_pec.c +++ b/hw/pci-host/pnv_phb4_pec.c @@ -391,7 +391,7 @@ static void pnv_pec_realize(DeviceState *dev, Error **errp) object_property_set_int(stk_obj, i, "stack-no", &error_abort); object_property_set_link(stk_obj, OBJECT(pec), "pec", &error_abort); - object_property_set_bool(stk_obj, true, "realized", errp); + object_property_set_bool(stk_obj, true, "realized", &local_err); if (local_err) { error_propagate(errp, local_err); return; diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 354828bf13..dd86e664d2 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -29,6 +29,8 @@ config POWERNV select XICS select XIVE select FDT_PPC + select PCI_EXPRESS + select MSI_NONBROKEN config PPC405 bool @@ -135,8 +137,6 @@ config XIVE_SPAPR default y depends on PSERIES select XIVE - select PCI - select PCIE_PORT config XIVE_KVM bool diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs index a4bac57be6..c3d3cc56eb 100644 --- a/hw/ppc/Makefile.objs +++ b/hw/ppc/Makefile.objs @@ -7,7 +7,7 @@ obj-$(CONFIG_PSERIES) += spapr.o spapr_caps.o spapr_vio.o spapr_events.o obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o spapr_irq.o -obj-$(CONFIG_PSERIES) += spapr_tpm_proxy.o +obj-$(CONFIG_PSERIES) += spapr_tpm_proxy.o spapr_nvdimm.o obj-$(CONFIG_SPAPR_RNG) += spapr_rng.o obj-$(call land,$(CONFIG_PSERIES),$(CONFIG_LINUX)) += spapr_pci_vfio.o spapr_pci_nvlink2.o # IBM PowerNV diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 886442e54f..af537bba2b 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -594,6 +594,7 @@ done: cpu_physical_memory_write(addr, fdt, fdt_size); } ret = fdt_size; + g_free(fdt); out: g_free(pci_map); diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 139c857b1e..e98038b809 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -582,6 +582,8 @@ static void pnv_reset(MachineState *machine) qemu_fdt_dumpdtb(fdt, fdt_totalsize(fdt)); cpu_physical_memory_write(PNV_FDT_ADDR, fdt, fdt_totalsize(fdt)); + + g_free(fdt); } static ISABus *pnv_chip_power8_isa_create(PnvChip *chip, Error **errp) diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index c9b2e0a5e0..828e2cc135 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -80,6 +80,7 @@ #include "hw/ppc/spapr_cpu_core.h" #include "hw/mem/memory-device.h" #include "hw/ppc/spapr_tpm_proxy.h" +#include "hw/ppc/spapr_nvdimm.h" #include "monitor/monitor.h" @@ -675,6 +676,14 @@ static int spapr_populate_drmem_v2(SpaprMachineState *spapr, void *fdt, size = di->size; node = di->node; + /* + * The NVDIMM area is hotpluggable after the NVDIMM is unplugged. The + * area is marked hotpluggable in the next iteration for the bigger + * chunk including the NVDIMM occupied area. + */ + if (info->value->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) + continue; + /* Entry for hot-pluggable area */ if (cur_addr < addr) { drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB, cur_addr / lmb_size); @@ -1055,7 +1064,7 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt) } if (spapr->kernel_size) { - uint64_t kprop[2] = { cpu_to_be64(KERNEL_LOAD_ADDR), + uint64_t kprop[2] = { cpu_to_be64(spapr->kernel_addr), cpu_to_be64(spapr->kernel_size) }; _FDT(fdt_setprop(fdt, chosen, "qemu,boot-kernel", @@ -1243,7 +1252,8 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) /* Build memory reserve map */ if (reset) { if (spapr->kernel_size) { - _FDT((fdt_add_mem_rsv(fdt, KERNEL_LOAD_ADDR, spapr->kernel_size))); + _FDT((fdt_add_mem_rsv(fdt, spapr->kernel_addr, + spapr->kernel_size))); } if (spapr->initrd_size) { _FDT((fdt_add_mem_rsv(fdt, spapr->initrd_base, @@ -1266,12 +1276,19 @@ void *spapr_build_fdt(SpaprMachineState *spapr, bool reset, size_t space) } } + /* NVDIMM devices */ + if (mc->nvdimm_supported) { + spapr_dt_persistent_memory(fdt); + } + return fdt; } static uint64_t translate_kernel_address(void *opaque, uint64_t addr) { - return (addr & 0x0fffffff) + KERNEL_LOAD_ADDR; + SpaprMachineState *spapr = opaque; + + return (addr & 0x0fffffff) + spapr->kernel_addr; } static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, @@ -2629,6 +2646,7 @@ static void spapr_machine_init(MachineState *machine) { SpaprMachineState *spapr = SPAPR_MACHINE(machine); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + MachineClass *mc = MACHINE_GET_CLASS(machine); const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; PCIHostState *phb; @@ -2861,6 +2879,10 @@ static void spapr_machine_init(MachineState *machine) "may run and log hardware error on the destination"); } + if (mc->nvdimm_supported) { + spapr_create_nvdimm_dr_connectors(spapr); + } + /* Set up RTAS event infrastructure */ spapr_events_init(spapr); @@ -2948,14 +2970,15 @@ static void spapr_machine_init(MachineState *machine) uint64_t lowaddr = 0; spapr->kernel_size = load_elf(kernel_filename, NULL, - translate_kernel_address, NULL, + translate_kernel_address, spapr, NULL, &lowaddr, NULL, NULL, 1, PPC_ELF_MACHINE, 0, 0); if (spapr->kernel_size == ELF_LOAD_WRONG_ENDIAN) { spapr->kernel_size = load_elf(kernel_filename, NULL, - translate_kernel_address, NULL, NULL, + translate_kernel_address, spapr, NULL, &lowaddr, NULL, NULL, 0, - PPC_ELF_MACHINE, 0, 0); + PPC_ELF_MACHINE, + 0, 0); spapr->kernel_le = spapr->kernel_size > 0; } if (spapr->kernel_size < 0) { @@ -2969,7 +2992,7 @@ static void spapr_machine_init(MachineState *machine) /* Try to locate the initrd in the gap between the kernel * and the firmware. Add a bit of space just in case */ - spapr->initrd_base = (KERNEL_LOAD_ADDR + spapr->kernel_size + spapr->initrd_base = (spapr->kernel_addr + spapr->kernel_size + 0x1ffff) & ~0xffff; spapr->initrd_size = load_image_targphys(initrd_filename, spapr->initrd_base, @@ -3215,6 +3238,18 @@ static void spapr_set_vsmt(Object *obj, Visitor *v, const char *name, visit_type_uint32(v, name, (uint32_t *)opaque, errp); } +static void spapr_get_kernel_addr(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint64(v, name, (uint64_t *)opaque, errp); +} + +static void spapr_set_kernel_addr(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + visit_type_uint64(v, name, (uint64_t *)opaque, errp); +} + static char *spapr_get_ic_mode(Object *obj, Error **errp) { SpaprMachineState *spapr = SPAPR_MACHINE(obj); @@ -3320,6 +3355,14 @@ static void spapr_instance_init(Object *obj) object_property_add_bool(obj, "vfio-no-msix-emulation", spapr_get_msix_emulation, NULL, NULL); + object_property_add(obj, "kernel-addr", "uint64", spapr_get_kernel_addr, + spapr_set_kernel_addr, NULL, &spapr->kernel_addr, + &error_abort); + object_property_set_description(obj, "kernel-addr", + stringify(KERNEL_LOAD_ADDR) + " for -kernel is the default", + NULL); + spapr->kernel_addr = KERNEL_LOAD_ADDR; /* The machine class defines the default interrupt controller mode */ spapr->irq = smc->irq; object_property_add_str(obj, "ic-mode", spapr_get_ic_mode, @@ -3430,7 +3473,8 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error *local_err = NULL; SpaprMachineState *ms = SPAPR_MACHINE(hotplug_dev); PCDIMMDevice *dimm = PC_DIMM(dev); - uint64_t size, addr; + uint64_t size, addr, slot; + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); @@ -3439,14 +3483,24 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, goto out; } - addr = object_property_get_uint(OBJECT(dimm), - PC_DIMM_ADDR_PROP, &local_err); - if (local_err) { - goto out_unplug; + if (!is_nvdimm) { + addr = object_property_get_uint(OBJECT(dimm), + PC_DIMM_ADDR_PROP, &local_err); + if (local_err) { + goto out_unplug; + } + spapr_add_lmbs(dev, addr, size, + spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), + &local_err); + } else { + slot = object_property_get_uint(OBJECT(dimm), + PC_DIMM_SLOT_PROP, &local_err); + if (local_err) { + goto out_unplug; + } + spapr_add_nvdimm(dev, slot, &local_err); } - spapr_add_lmbs(dev, addr, size, spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT), - &local_err); if (local_err) { goto out_unplug; } @@ -3464,6 +3518,8 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, { const SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(hotplug_dev); SpaprMachineState *spapr = SPAPR_MACHINE(hotplug_dev); + const MachineClass *mc = MACHINE_CLASS(smc); + bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); PCDIMMDevice *dimm = PC_DIMM(dev); Error *local_err = NULL; uint64_t size; @@ -3475,16 +3531,27 @@ static void spapr_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, return; } + if (is_nvdimm && !mc->nvdimm_supported) { + error_setg(errp, "NVDIMM hotplug not supported for this machine"); + return; + } + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); if (local_err) { error_propagate(errp, local_err); return; } - if (size % SPAPR_MEMORY_BLOCK_SIZE) { + if (!is_nvdimm && size % SPAPR_MEMORY_BLOCK_SIZE) { error_setg(errp, "Hotplugged memory size must be a multiple of " - "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); + "%" PRIu64 " MB", SPAPR_MEMORY_BLOCK_SIZE / MiB); return; + } else if (is_nvdimm) { + spapr_nvdimm_validate_opts(NVDIMM(dev), size, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } } memdev = object_property_get_link(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, @@ -3624,6 +3691,12 @@ static void spapr_memory_unplug_request(HotplugHandler *hotplug_dev, int i; SpaprDrc *drc; + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + error_setg(&local_err, + "nvdimm device hot unplug is not supported yet."); + goto out; + } + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &error_abort); nr_lmbs = size / SPAPR_MEMORY_BLOCK_SIZE; @@ -4418,6 +4491,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->update_dt_enabled = true; mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power9_v2.0"); mc->has_hotpluggable_cpus = true; + mc->nvdimm_supported = true; smc->resize_hpt_default = SPAPR_RESIZE_HPT_ENABLED; fwc->get_dev_path = spapr_get_fw_dev_path; nc->nmi_monitor_handler = spapr_nmi; @@ -4485,6 +4559,12 @@ static const TypeInfo spapr_machine_info = { }, }; +static void spapr_machine_latest_class_options(MachineClass *mc) +{ + mc->alias = "pseries"; + mc->is_default = 1; +} + #define DEFINE_SPAPR_MACHINE(suffix, verstr, latest) \ static void spapr_machine_##suffix##_class_init(ObjectClass *oc, \ void *data) \ @@ -4492,8 +4572,7 @@ static const TypeInfo spapr_machine_info = { MachineClass *mc = MACHINE_CLASS(oc); \ spapr_machine_##suffix##_class_options(mc); \ if (latest) { \ - mc->alias = "pseries"; \ - mc->is_default = 1; \ + spapr_machine_latest_class_options(mc); \ } \ } \ static const TypeInfo spapr_machine_##suffix##_info = { \ @@ -4528,6 +4607,7 @@ static void spapr_machine_4_2_class_options(MachineClass *mc) compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; smc->default_caps.caps[SPAPR_CAP_FWNMI_MCE] = SPAPR_CAP_OFF; + mc->nvdimm_supported = false; } DEFINE_SPAPR_MACHINE(4_2, "4.2", false); diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index 17aeac3801..e373d342eb 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -22,6 +22,7 @@ #include "qemu/error-report.h" #include "hw/ppc/spapr.h" /* for RTAS return codes */ #include "hw/pci-host/spapr.h" /* spapr_phb_remove_pci_device_cb callback */ +#include "hw/ppc/spapr_nvdimm.h" #include "sysemu/device_tree.h" #include "sysemu/reset.h" #include "trace.h" @@ -455,21 +456,46 @@ void spapr_drc_reset(SpaprDrc *drc) } } -bool spapr_drc_needed(void *opaque) +static bool spapr_drc_unplug_requested_needed(void *opaque) +{ + return spapr_drc_unplug_requested(opaque); +} + +static const VMStateDescription vmstate_spapr_drc_unplug_requested = { + .name = "spapr_drc/unplug_requested", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_drc_unplug_requested_needed, + .fields = (VMStateField []) { + VMSTATE_BOOL(unplug_requested, SpaprDrc), + VMSTATE_END_OF_LIST() + } +}; + +bool spapr_drc_transient(SpaprDrc *drc) { - SpaprDrc *drc = (SpaprDrc *)opaque; SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); - /* If no dev is plugged in there is no need to migrate the DRC state */ + /* + * If no dev is plugged in there is no need to migrate the DRC state + * nor to reset the DRC at CAS. + */ if (!drc->dev) { return false; } /* - * We need to migrate the state if it's not equal to the expected - * long-term state, which is the same as the coldplugged initial - * state */ - return (drc->state != drck->ready_state); + * We need to reset the DRC at CAS or to migrate the DRC state if it's + * not equal to the expected long-term state, which is the same as the + * coldplugged initial state, or if an unplug request is pending. + */ + return drc->state != drck->ready_state || + spapr_drc_unplug_requested(drc); +} + +static bool spapr_drc_needed(void *opaque) +{ + return spapr_drc_transient(opaque); } static const VMStateDescription vmstate_spapr_drc = { @@ -480,6 +506,10 @@ static const VMStateDescription vmstate_spapr_drc = { .fields = (VMStateField []) { VMSTATE_UINT32(state, SpaprDrc), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_spapr_drc_unplug_requested, + NULL } }; @@ -709,6 +739,17 @@ static void spapr_drc_phb_class_init(ObjectClass *k, void *data) drck->dt_populate = spapr_phb_dt_populate; } +static void spapr_drc_pmem_class_init(ObjectClass *k, void *data) +{ + SpaprDrcClass *drck = SPAPR_DR_CONNECTOR_CLASS(k); + + drck->typeshift = SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM; + drck->typename = "PMEM"; + drck->drc_name_prefix = "PMEM "; + drck->release = NULL; + drck->dt_populate = spapr_pmem_dt_populate; +} + static const TypeInfo spapr_dr_connector_info = { .name = TYPE_SPAPR_DR_CONNECTOR, .parent = TYPE_DEVICE, @@ -759,6 +800,12 @@ static const TypeInfo spapr_drc_phb_info = { .class_init = spapr_drc_phb_class_init, }; +static const TypeInfo spapr_drc_pmem_info = { + .name = TYPE_SPAPR_DRC_PMEM, + .parent = TYPE_SPAPR_DRC_LOGICAL, + .class_init = spapr_drc_pmem_class_init, +}; + /* helper functions for external users */ SpaprDrc *spapr_drc_by_index(uint32_t index) @@ -1230,6 +1277,7 @@ static void spapr_drc_register_types(void) type_register_static(&spapr_drc_pci_info); type_register_static(&spapr_drc_lmb_info); type_register_static(&spapr_drc_phb_info); + type_register_static(&spapr_drc_pmem_info); spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator", rtas_set_indicator); diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c index 884e455f02..8b32b7eea5 100644 --- a/hw/ppc/spapr_events.c +++ b/hw/ppc/spapr_events.c @@ -196,6 +196,7 @@ struct rtas_event_log_v6_hp { #define RTAS_LOG_V6_HP_TYPE_SLOT 3 #define RTAS_LOG_V6_HP_TYPE_PHB 4 #define RTAS_LOG_V6_HP_TYPE_PCI 5 +#define RTAS_LOG_V6_HP_TYPE_PMEM 6 uint8_t hotplug_action; #define RTAS_LOG_V6_HP_ACTION_ADD 1 #define RTAS_LOG_V6_HP_ACTION_REMOVE 2 @@ -631,6 +632,9 @@ static void spapr_hotplug_req_event(uint8_t hp_id, uint8_t hp_action, case SPAPR_DR_CONNECTOR_TYPE_PHB: hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PHB; break; + case SPAPR_DR_CONNECTOR_TYPE_PMEM: + hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PMEM; + break; default: /* we shouldn't be signaling hotplug events for resources * that don't support them diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index b8bb66b5c0..6db3dbde9c 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1640,20 +1640,24 @@ static uint32_t cas_check_pvr(SpaprMachineState *spapr, PowerPCCPU *cpu, return best_compat; } -static bool spapr_hotplugged_dev_before_cas(void) +static bool spapr_transient_dev_before_cas(void) { - Object *drc_container, *obj; + Object *drc_container; ObjectProperty *prop; ObjectPropertyIterator iter; drc_container = container_get(object_get_root(), "/dr-connector"); object_property_iter_init(&iter, drc_container); while ((prop = object_property_iter_next(&iter))) { + SpaprDrc *drc; + if (!strstart(prop->type, "link<", NULL)) { continue; } - obj = object_property_get_link(drc_container, prop->name, NULL); - if (spapr_drc_needed(obj)) { + drc = SPAPR_DR_CONNECTOR(object_property_get_link(drc_container, + prop->name, NULL)); + + if (spapr_drc_transient(drc)) { return true; } } @@ -1830,7 +1834,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, spapr_irq_update_active_intc(spapr); - if (spapr_hotplugged_dev_before_cas()) { + if (spapr_transient_dev_before_cas()) { spapr->cas_reboot = true; } diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c new file mode 100644 index 0000000000..74eeb8bb74 --- /dev/null +++ b/hw/ppc/spapr_nvdimm.c @@ -0,0 +1,475 @@ +/* + * QEMU PAPR Storage Class Memory Interfaces + * + * Copyright (c) 2019-2020, IBM Corporation. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/ppc/spapr_drc.h" +#include "hw/ppc/spapr_nvdimm.h" +#include "hw/mem/nvdimm.h" +#include "qemu/nvdimm-utils.h" +#include "hw/ppc/fdt.h" +#include "qemu/range.h" + +void spapr_nvdimm_validate_opts(NVDIMMDevice *nvdimm, uint64_t size, + Error **errp) +{ + char *uuidstr = NULL; + QemuUUID uuid; + + if (size % SPAPR_MINIMUM_SCM_BLOCK_SIZE) { + error_setg(errp, "NVDIMM memory size excluding the label area" + " must be a multiple of %" PRIu64 "MB", + SPAPR_MINIMUM_SCM_BLOCK_SIZE / MiB); + return; + } + + uuidstr = object_property_get_str(OBJECT(nvdimm), NVDIMM_UUID_PROP, NULL); + qemu_uuid_parse(uuidstr, &uuid); + g_free(uuidstr); + + if (qemu_uuid_is_null(&uuid)) { + error_setg(errp, "NVDIMM device requires the uuid to be set"); + return; + } +} + + +void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp) +{ + SpaprDrc *drc; + bool hotplugged = spapr_drc_hotplugged(dev); + Error *local_err = NULL; + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot); + g_assert(drc); + + spapr_drc_attach(drc, dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (hotplugged) { + spapr_hotplug_req_add_by_index(drc); + } +} + +int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp) +{ + NVDIMMDevice *nvdimm = NVDIMM(drc->dev); + + *fdt_start_offset = spapr_dt_nvdimm(fdt, 0, nvdimm); + + return 0; +} + +void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr) +{ + MachineState *machine = MACHINE(spapr); + int i; + + for (i = 0; i < machine->ram_slots; i++) { + spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_PMEM, i); + } +} + + +int spapr_dt_nvdimm(void *fdt, int parent_offset, + NVDIMMDevice *nvdimm) +{ + int child_offset; + char *buf; + SpaprDrc *drc; + uint32_t drc_idx; + uint32_t node = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_NODE_PROP, + &error_abort); + uint64_t slot = object_property_get_uint(OBJECT(nvdimm), PC_DIMM_SLOT_PROP, + &error_abort); + uint32_t associativity[] = { + cpu_to_be32(0x4), /* length */ + cpu_to_be32(0x0), cpu_to_be32(0x0), + cpu_to_be32(0x0), cpu_to_be32(node) + }; + uint64_t lsize = nvdimm->label_size; + uint64_t size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + NULL); + + drc = spapr_drc_by_id(TYPE_SPAPR_DRC_PMEM, slot); + g_assert(drc); + + drc_idx = spapr_drc_index(drc); + + buf = g_strdup_printf("ibm,pmemory@%x", drc_idx); + child_offset = fdt_add_subnode(fdt, parent_offset, buf); + g_free(buf); + + _FDT(child_offset); + + _FDT((fdt_setprop_cell(fdt, child_offset, "reg", drc_idx))); + _FDT((fdt_setprop_string(fdt, child_offset, "compatible", "ibm,pmemory"))); + _FDT((fdt_setprop_string(fdt, child_offset, "device_type", "ibm,pmemory"))); + + _FDT((fdt_setprop(fdt, child_offset, "ibm,associativity", associativity, + sizeof(associativity)))); + + buf = qemu_uuid_unparse_strdup(&nvdimm->uuid); + _FDT((fdt_setprop_string(fdt, child_offset, "ibm,unit-guid", buf))); + g_free(buf); + + _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,my-drc-index", drc_idx))); + + _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,block-size", + SPAPR_MINIMUM_SCM_BLOCK_SIZE))); + _FDT((fdt_setprop_u64(fdt, child_offset, "ibm,number-of-blocks", + size / SPAPR_MINIMUM_SCM_BLOCK_SIZE))); + _FDT((fdt_setprop_cell(fdt, child_offset, "ibm,metadata-size", lsize))); + + _FDT((fdt_setprop_string(fdt, child_offset, "ibm,pmem-application", + "operating-system"))); + _FDT(fdt_setprop(fdt, child_offset, "ibm,cache-flush-required", NULL, 0)); + + return child_offset; +} + +void spapr_dt_persistent_memory(void *fdt) +{ + int offset = fdt_subnode_offset(fdt, 0, "persistent-memory"); + GSList *iter, *nvdimms = nvdimm_get_device_list(); + + if (offset < 0) { + offset = fdt_add_subnode(fdt, 0, "persistent-memory"); + _FDT(offset); + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 0x1))); + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0x0))); + _FDT((fdt_setprop_string(fdt, offset, "device_type", + "ibm,persistent-memory"))); + } + + /* Create DT entries for cold plugged NVDIMM devices */ + for (iter = nvdimms; iter; iter = iter->next) { + NVDIMMDevice *nvdimm = iter->data; + + spapr_dt_nvdimm(fdt, offset, nvdimm); + } + g_slist_free(nvdimms); + + return; +} + +static target_ulong h_scm_read_metadata(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t offset = args[1]; + uint64_t len = args[2]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + NVDIMMClass *ddc; + uint64_t data = 0; + uint8_t buf[8] = { 0 }; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + if (len != 1 && len != 2 && + len != 4 && len != 8) { + return H_P3; + } + + nvdimm = NVDIMM(drc->dev); + if ((offset + len < offset) || + (nvdimm->label_size < len + offset)) { + return H_P2; + } + + ddc = NVDIMM_GET_CLASS(nvdimm); + ddc->read_label_data(nvdimm, buf, len, offset); + + switch (len) { + case 1: + data = ldub_p(buf); + break; + case 2: + data = lduw_be_p(buf); + break; + case 4: + data = ldl_be_p(buf); + break; + case 8: + data = ldq_be_p(buf); + break; + default: + g_assert_not_reached(); + } + + args[0] = data; + + return H_SUCCESS; +} + +static target_ulong h_scm_write_metadata(PowerPCCPU *cpu, + SpaprMachineState *spapr, + target_ulong opcode, + target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t offset = args[1]; + uint64_t data = args[2]; + uint64_t len = args[3]; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + NVDIMMClass *ddc; + uint8_t buf[8] = { 0 }; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + if (len != 1 && len != 2 && + len != 4 && len != 8) { + return H_P4; + } + + nvdimm = NVDIMM(drc->dev); + if ((offset + len < offset) || + (nvdimm->label_size < len + offset)) { + return H_P2; + } + + switch (len) { + case 1: + if (data & 0xffffffffffffff00) { + return H_P2; + } + stb_p(buf, data); + break; + case 2: + if (data & 0xffffffffffff0000) { + return H_P2; + } + stw_be_p(buf, data); + break; + case 4: + if (data & 0xffffffff00000000) { + return H_P2; + } + stl_be_p(buf, data); + break; + case 8: + stq_be_p(buf, data); + break; + default: + g_assert_not_reached(); + } + + ddc = NVDIMM_GET_CLASS(nvdimm); + ddc->write_label_data(nvdimm, buf, len, offset); + + return H_SUCCESS; +} + +static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t starting_idx = args[1]; + uint64_t no_of_scm_blocks_to_bind = args[2]; + uint64_t target_logical_mem_addr = args[3]; + uint64_t continue_token = args[4]; + uint64_t size; + uint64_t total_no_of_scm_blocks; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + hwaddr addr; + NVDIMMDevice *nvdimm; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + /* + * Currently continue token should be zero qemu has already bound + * everything and this hcall doesnt return H_BUSY. + */ + if (continue_token > 0) { + return H_P5; + } + + /* Currently qemu assigns the address. */ + if (target_logical_mem_addr != 0xffffffffffffffff) { + return H_OVERLAP; + } + + nvdimm = NVDIMM(drc->dev); + + size = object_property_get_uint(OBJECT(nvdimm), + PC_DIMM_SIZE_PROP, &error_abort); + + total_no_of_scm_blocks = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + + if (starting_idx > total_no_of_scm_blocks) { + return H_P2; + } + + if (((starting_idx + no_of_scm_blocks_to_bind) < starting_idx) || + ((starting_idx + no_of_scm_blocks_to_bind) > total_no_of_scm_blocks)) { + return H_P3; + } + + addr = object_property_get_uint(OBJECT(nvdimm), + PC_DIMM_ADDR_PROP, &error_abort); + + addr += starting_idx * SPAPR_MINIMUM_SCM_BLOCK_SIZE; + + /* Already bound, Return target logical address in R5 */ + args[1] = addr; + args[2] = no_of_scm_blocks_to_bind; + + return H_SUCCESS; +} + +static target_ulong h_scm_unbind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint32_t drc_index = args[0]; + uint64_t starting_scm_logical_addr = args[1]; + uint64_t no_of_scm_blocks_to_unbind = args[2]; + uint64_t continue_token = args[3]; + uint64_t size_to_unbind; + Range blockrange = range_empty; + Range nvdimmrange = range_empty; + SpaprDrc *drc = spapr_drc_by_index(drc_index); + NVDIMMDevice *nvdimm; + uint64_t size, addr; + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_PARAMETER; + } + + /* continue_token should be zero as this hcall doesn't return H_BUSY. */ + if (continue_token > 0) { + return H_P4; + } + + /* Check if starting_scm_logical_addr is block aligned */ + if (!QEMU_IS_ALIGNED(starting_scm_logical_addr, + SPAPR_MINIMUM_SCM_BLOCK_SIZE)) { + return H_P2; + } + + size_to_unbind = no_of_scm_blocks_to_unbind * SPAPR_MINIMUM_SCM_BLOCK_SIZE; + if (no_of_scm_blocks_to_unbind == 0 || no_of_scm_blocks_to_unbind != + size_to_unbind / SPAPR_MINIMUM_SCM_BLOCK_SIZE) { + return H_P3; + } + + nvdimm = NVDIMM(drc->dev); + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + addr = object_property_get_int(OBJECT(nvdimm), PC_DIMM_ADDR_PROP, + &error_abort); + + range_init_nofail(&nvdimmrange, addr, size); + range_init_nofail(&blockrange, starting_scm_logical_addr, size_to_unbind); + + if (!range_contains_range(&nvdimmrange, &blockrange)) { + return H_P3; + } + + args[1] = no_of_scm_blocks_to_unbind; + + /* let unplug take care of actual unbind */ + return H_SUCCESS; +} + +#define H_UNBIND_SCOPE_ALL 0x1 +#define H_UNBIND_SCOPE_DRC 0x2 + +static target_ulong h_scm_unbind_all(PowerPCCPU *cpu, SpaprMachineState *spapr, + target_ulong opcode, target_ulong *args) +{ + uint64_t target_scope = args[0]; + uint32_t drc_index = args[1]; + uint64_t continue_token = args[2]; + NVDIMMDevice *nvdimm; + uint64_t size; + uint64_t no_of_scm_blocks_unbound = 0; + + /* continue_token should be zero as this hcall doesn't return H_BUSY. */ + if (continue_token > 0) { + return H_P4; + } + + if (target_scope == H_UNBIND_SCOPE_DRC) { + SpaprDrc *drc = spapr_drc_by_index(drc_index); + + if (!drc || !drc->dev || + spapr_drc_type(drc) != SPAPR_DR_CONNECTOR_TYPE_PMEM) { + return H_P2; + } + + nvdimm = NVDIMM(drc->dev); + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + + no_of_scm_blocks_unbound = size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + } else if (target_scope == H_UNBIND_SCOPE_ALL) { + GSList *list, *nvdimms; + + nvdimms = nvdimm_get_device_list(); + for (list = nvdimms; list; list = list->next) { + nvdimm = list->data; + size = object_property_get_int(OBJECT(nvdimm), PC_DIMM_SIZE_PROP, + &error_abort); + + no_of_scm_blocks_unbound += size / SPAPR_MINIMUM_SCM_BLOCK_SIZE; + } + g_slist_free(nvdimms); + } else { + return H_PARAMETER; + } + + args[1] = no_of_scm_blocks_unbound; + + /* let unplug take care of actual unbind */ + return H_SUCCESS; +} + +static void spapr_scm_register_types(void) +{ + /* qemu/scm specific hcalls */ + spapr_register_hypercall(H_SCM_READ_METADATA, h_scm_read_metadata); + spapr_register_hypercall(H_SCM_WRITE_METADATA, h_scm_write_metadata); + spapr_register_hypercall(H_SCM_BIND_MEM, h_scm_bind_mem); + spapr_register_hypercall(H_SCM_UNBIND_MEM, h_scm_unbind_mem); + spapr_register_hypercall(H_SCM_UNBIND_ALL, h_scm_unbind_all); +} + +type_init(spapr_scm_register_types) diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index 883fe28465..656fdd2216 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -345,6 +345,13 @@ static void rtas_ibm_os_term(PowerPCCPU *cpu, target_ulong args, uint32_t nret, target_ulong rets) { + target_ulong msgaddr = rtas_ld(args, 0); + char msg[512]; + + cpu_physical_memory_read(msgaddr, msg, sizeof(msg) - 1); + msg[sizeof(msg) - 1] = 0; + + error_report("OS terminated: %s", msg); qemu_system_guest_panicked(NULL); rtas_st(rets, 0, RTAS_OUT_SUCCESS); diff --git a/hw/ppc/virtex_ml507.c b/hw/ppc/virtex_ml507.c index 91dd00ee91..4eef70069f 100644 --- a/hw/ppc/virtex_ml507.c +++ b/hw/ppc/virtex_ml507.c @@ -188,6 +188,7 @@ static int xilinx_load_device_tree(hwaddr addr, if (r < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); cpu_physical_memory_write(addr, fdt, fdt_size); + g_free(fdt); return fdt_size; } diff --git a/hw/sh4/sh_pci.c b/hw/sh4/sh_pci.c index 71afd23b67..08f2fc1dde 100644 --- a/hw/sh4/sh_pci.c +++ b/hw/sh4/sh_pci.c @@ -67,12 +67,8 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, pcic->mbr = val & 0xff000001; break; case 0x1c8: - if ((val & 0xfffc0000) != (pcic->iobr & 0xfffc0000)) { - memory_region_del_subregion(get_system_memory(), &pcic->isa); - pcic->iobr = val & 0xfffc0001; - memory_region_add_subregion(get_system_memory(), - pcic->iobr & 0xfffc0000, &pcic->isa); - } + pcic->iobr = val & 0xfffc0001; + memory_region_set_alias_offset(&pcic->isa, val & 0xfffc0000); break; case 0x220: pci_data_write(phb->bus, pcic->par, val, 4); @@ -147,8 +143,7 @@ static void sh_pci_device_realize(DeviceState *dev, Error **errp) get_system_io(), 0, 0x40000); sysbus_init_mmio(sbd, &s->memconfig_p4); sysbus_init_mmio(sbd, &s->memconfig_a7); - s->iobr = 0xfe240000; - memory_region_add_subregion(get_system_memory(), s->iobr, &s->isa); + memory_region_add_subregion(get_system_memory(), 0xfe240000, &s->isa); s->dev = pci_create_simple(phb->bus, PCI_DEVFN(0, 0), "sh_pci_host"); } diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 6c9ef59779..c57850a505 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -576,11 +576,11 @@ static int xilinx_spips_num_dummies(XilinxQSPIPS *qs, uint8_t command) case FAST_READ: case DOR: case QOR: + case FAST_READ_4: case DOR_4: case QOR_4: return 1; case DIOR: - case FAST_READ_4: case DIOR_4: return 2; case QIOR: diff --git a/hw/usb/hcd-ehci-sysbus.c b/hw/usb/hcd-ehci-sysbus.c index 8d4738565e..b22fb258be 100644 --- a/hw/usb/hcd-ehci-sysbus.c +++ b/hw/usb/hcd-ehci-sysbus.c @@ -33,6 +33,8 @@ static const VMStateDescription vmstate_ehci_sysbus = { static Property ehci_sysbus_properties[] = { DEFINE_PROP_UINT32("maxframes", EHCISysBusState, ehci.maxframes, 128), + DEFINE_PROP_BOOL("companion-enable", EHCISysBusState, ehci.companion_enable, + false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 8a94bd004a..1e6e85e86a 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -1870,21 +1870,6 @@ void ohci_sysbus_die(struct OHCIState *ohci) ohci_bus_stop(ohci); } -#define TYPE_SYSBUS_OHCI "sysbus-ohci" -#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI) - -typedef struct { - /*< private >*/ - SysBusDevice parent_obj; - /*< public >*/ - - OHCIState ohci; - char *masterbus; - uint32_t num_ports; - uint32_t firstport; - dma_addr_t dma_offset; -} OHCISysBusState; - static void ohci_realize_pxa(DeviceState *dev, Error **errp) { OHCISysBusState *s = SYSBUS_OHCI(dev); diff --git a/hw/usb/hcd-ohci.h b/hw/usb/hcd-ohci.h index 16e3f1e13a..5c8819aedf 100644 --- a/hw/usb/hcd-ohci.h +++ b/hw/usb/hcd-ohci.h @@ -22,6 +22,7 @@ #define HCD_OHCI_H #include "sysemu/dma.h" +#include "hw/usb.h" /* Number of Downstream Ports on the root hub: */ #define OHCI_MAX_PORTS 15 @@ -90,6 +91,21 @@ typedef struct OHCIState { void (*ohci_die)(struct OHCIState *ohci); } OHCIState; +#define TYPE_SYSBUS_OHCI "sysbus-ohci" +#define SYSBUS_OHCI(obj) OBJECT_CHECK(OHCISysBusState, (obj), TYPE_SYSBUS_OHCI) + +typedef struct { + /*< private >*/ + SysBusDevice parent_obj; + /*< public >*/ + + OHCIState ohci; + char *masterbus; + uint32_t num_ports; + uint32_t firstport; + dma_addr_t dma_offset; +} OHCISysBusState; + extern const VMStateDescription vmstate_ohci_state; void usb_ohci_init(OHCIState *ohci, DeviceState *dev, uint32_t num_ports, diff --git a/include/block/block.h b/include/block/block.h index 314ce63ed9..cd6b5b95aa 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -454,7 +454,7 @@ void bdrv_lock_medium(BlockDriverState *bs, bool locked); void bdrv_eject(BlockDriverState *bs, bool eject_flag); const char *bdrv_get_format_name(BlockDriverState *bs); BlockDriverState *bdrv_find_node(const char *node_name); -BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp); +BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp); XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp); BlockDriverState *bdrv_lookup_bs(const char *device, const char *node_name, diff --git a/include/block/qapi.h b/include/block/qapi.h index cd9410dee3..22c7807c89 100644 --- a/include/block/qapi.h +++ b/include/block/qapi.h @@ -29,7 +29,9 @@ #include "block/snapshot.h" BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk, - BlockDriverState *bs, Error **errp); + BlockDriverState *bs, + bool flat, + Error **errp); int bdrv_query_snapshot_info_list(BlockDriverState *bs, SnapshotInfoList **p_list, Error **errp); diff --git a/include/hw/arm/allwinner-a10.h b/include/hw/arm/allwinner-a10.h index 40d0b1d9c0..8af724548f 100644 --- a/include/hw/arm/allwinner-a10.h +++ b/include/hw/arm/allwinner-a10.h @@ -8,12 +8,16 @@ #include "hw/intc/allwinner-a10-pic.h" #include "hw/net/allwinner_emac.h" #include "hw/ide/ahci.h" +#include "hw/usb/hcd-ohci.h" +#include "hw/usb/hcd-ehci.h" #include "target/arm/cpu.h" #define AW_A10_SDRAM_BASE 0x40000000 +#define AW_A10_NUM_USB 2 + #define TYPE_AW_A10 "allwinner-a10" #define AW_A10(obj) OBJECT_CHECK(AwA10State, (obj), TYPE_AW_A10) @@ -28,6 +32,8 @@ typedef struct AwA10State { AwEmacState emac; AllwinnerAHCIState sata; MemoryRegion sram_a; + EHCISysBusState ehci[AW_A10_NUM_USB]; + OHCISysBusState ohci[AW_A10_NUM_USB]; } AwA10State; #endif diff --git a/include/hw/mem/nvdimm.h b/include/hw/mem/nvdimm.h index 523a9b3d4a..4807ca615b 100644 --- a/include/hw/mem/nvdimm.h +++ b/include/hw/mem/nvdimm.h @@ -25,6 +25,7 @@ #include "hw/mem/pc-dimm.h" #include "hw/acpi/bios-linker-loader.h" +#include "qemu/uuid.h" #define NVDIMM_DEBUG 0 #define nvdimm_debug(fmt, ...) \ @@ -49,6 +50,7 @@ TYPE_NVDIMM) #define NVDIMM_LABEL_SIZE_PROP "label-size" +#define NVDIMM_UUID_PROP "uuid" #define NVDIMM_UNARMED_PROP "unarmed" struct NVDIMMDevice { @@ -83,6 +85,11 @@ struct NVDIMMDevice { * the guest write persistence. */ bool unarmed; + + /* + * The PPC64 - spapr requires each nvdimm device have a uuid. + */ + QemuUUID uuid; }; typedef struct NVDIMMDevice NVDIMMDevice; diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index a1fba95c82..09110961a5 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -162,6 +162,7 @@ struct SpaprMachineState { void *fdt_blob; long kernel_size; bool kernel_le; + uint64_t kernel_addr; uint32_t initrd_base; long initrd_size; uint64_t rtc_offset; /* Now used only during incoming migration */ @@ -300,6 +301,7 @@ struct SpaprMachineState { #define H_P7 -60 #define H_P8 -61 #define H_P9 -62 +#define H_OVERLAP -68 #define H_UNSUPPORTED_FLAG -256 #define H_MULTI_THREADS_ACTIVE -9005 @@ -507,8 +509,13 @@ struct SpaprMachineState { #define H_INT_ESB 0x3C8 #define H_INT_SYNC 0x3CC #define H_INT_RESET 0x3D0 +#define H_SCM_READ_METADATA 0x3E4 +#define H_SCM_WRITE_METADATA 0x3E8 +#define H_SCM_BIND_MEM 0x3EC +#define H_SCM_UNBIND_MEM 0x3F0 +#define H_SCM_UNBIND_ALL 0x3FC -#define MAX_HCALL_OPCODE H_INT_RESET +#define MAX_HCALL_OPCODE H_SCM_UNBIND_ALL /* The hcalls above are standardized in PAPR and implemented by pHyp * as well. diff --git a/include/hw/ppc/spapr_drc.h b/include/hw/ppc/spapr_drc.h index 83f03cc577..21af8deac1 100644 --- a/include/hw/ppc/spapr_drc.h +++ b/include/hw/ppc/spapr_drc.h @@ -78,6 +78,13 @@ #define SPAPR_DRC_PHB(obj) OBJECT_CHECK(SpaprDrc, (obj), \ TYPE_SPAPR_DRC_PHB) +#define TYPE_SPAPR_DRC_PMEM "spapr-drc-pmem" +#define SPAPR_DRC_PMEM_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SpaprDrcClass, obj, TYPE_SPAPR_DRC_PMEM) +#define SPAPR_DRC_PMEM_CLASS(klass) \ + OBJECT_CLASS_CHECK(SpaprDrcClass, klass, TYPE_SPAPR_DRC_PMEM) +#define SPAPR_DRC_PMEM(obj) OBJECT_CHECK(SpaprDrc, (obj), \ + TYPE_SPAPR_DRC_PMEM) /* * Various hotplug types managed by SpaprDrc * @@ -95,6 +102,7 @@ typedef enum { SPAPR_DR_CONNECTOR_TYPE_SHIFT_VIO = 3, SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI = 4, SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB = 8, + SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM = 9, } SpaprDrcTypeShift; typedef enum { @@ -104,6 +112,7 @@ typedef enum { SPAPR_DR_CONNECTOR_TYPE_VIO = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_VIO, SPAPR_DR_CONNECTOR_TYPE_PCI = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_PCI, SPAPR_DR_CONNECTOR_TYPE_LMB = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_LMB, + SPAPR_DR_CONNECTOR_TYPE_PMEM = 1 << SPAPR_DR_CONNECTOR_TYPE_SHIFT_PMEM, } SpaprDrcType; /* @@ -269,7 +278,9 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask); void spapr_drc_attach(SpaprDrc *drc, DeviceState *d, Error **errp); void spapr_drc_detach(SpaprDrc *drc); -bool spapr_drc_needed(void *opaque); + +/* Returns true if a hot plug/unplug request is pending */ +bool spapr_drc_transient(SpaprDrc *drc); static inline bool spapr_drc_unplug_requested(SpaprDrc *drc) { diff --git a/include/hw/ppc/spapr_nvdimm.h b/include/hw/ppc/spapr_nvdimm.h new file mode 100644 index 0000000000..b3330cc485 --- /dev/null +++ b/include/hw/ppc/spapr_nvdimm.h @@ -0,0 +1,37 @@ +/* + * QEMU PowerPC PAPR SCM backend definitions + * + * Copyright (c) 2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#ifndef HW_SPAPR_NVDIMM_H +#define HW_SPAPR_NVDIMM_H + +#include "hw/mem/nvdimm.h" +#include "hw/ppc/spapr.h" + +/* + * The nvdimm size should be aligned to SCM block size. + * The SCM block size should be aligned to SPAPR_MEMORY_BLOCK_SIZE + * inorder to have SCM regions not to overlap with dimm memory regions. + * The SCM devices can have variable block sizes. For now, fixing the + * block size to the minimum value. + */ +#define SPAPR_MINIMUM_SCM_BLOCK_SIZE SPAPR_MEMORY_BLOCK_SIZE + +/* Have an explicit check for alignment */ +QEMU_BUILD_BUG_ON(SPAPR_MINIMUM_SCM_BLOCK_SIZE % SPAPR_MEMORY_BLOCK_SIZE); + +int spapr_pmem_dt_populate(SpaprDrc *drc, SpaprMachineState *spapr, + void *fdt, int *fdt_start_offset, Error **errp); +int spapr_dt_nvdimm(void *fdt, int parent_offset, NVDIMMDevice *nvdimm); +void spapr_dt_persistent_memory(void *fdt); +void spapr_nvdimm_validate_opts(NVDIMMDevice *nvdimm, uint64_t size, + Error **errp); +void spapr_add_nvdimm(DeviceState *dev, uint64_t slot, Error **errp); +void spapr_create_nvdimm_dr_connectors(SpaprMachineState *spapr); + +#endif diff --git a/include/qemu/nvdimm-utils.h b/include/qemu/nvdimm-utils.h new file mode 100644 index 0000000000..4b8b198ba7 --- /dev/null +++ b/include/qemu/nvdimm-utils.h @@ -0,0 +1,7 @@ +#ifndef NVDIMM_UTILS_H +#define NVDIMM_UTILS_H + +#include "qemu/osdep.h" + +GSList *nvdimm_get_device_list(void); +#endif diff --git a/linux-user/elfload.c b/linux-user/elfload.c index f3080a1635..b1a895f24c 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -475,8 +475,8 @@ static uint32_t get_elf_hwcap(void) GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3); GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); GET_FEATURE(ARM_FEATURE_VFP4, ARM_HWCAP_ARM_VFPv4); - GET_FEATURE_ID(arm_div, ARM_HWCAP_ARM_IDIVA); - GET_FEATURE_ID(thumb_div, ARM_HWCAP_ARM_IDIVT); + GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); + GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); /* All QEMU's VFPv3 CPUs have 32 registers, see VFP_DREG in translate.c. * Note that the ARM_HWCAP_ARM_VFPv3D16 bit is always the inverse of * ARM_HWCAP_ARM_VFPD32 (and so always clear for QEMU); it is unrelated diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index b237613e0d..53bc3f76c4 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -620,7 +620,7 @@ void hmp_info_block(Monitor *mon, const QDict *qdict) } /* Print node information */ - blockdev_list = qmp_query_named_block_nodes(NULL); + blockdev_list = qmp_query_named_block_nodes(false, false, NULL); for (blockdev = blockdev_list; blockdev; blockdev = blockdev->next) { assert(blockdev->value->has_node_name); if (device && strcmp(device, blockdev->value->node_name)) { diff --git a/qapi/block-core.json b/qapi/block-core.json index 37d7ea7295..85e27bb61f 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -1758,6 +1758,9 @@ # # Get the named block driver list # +# @flat: Omit the nested data about backing image ("backing-image" key) if true. +# Default is false (Since 5.0) +# # Returns: the list of BlockDeviceInfo # # Since: 2.0 @@ -1811,7 +1814,9 @@ # } } ] } # ## -{ 'command': 'query-named-block-nodes', 'returns': [ 'BlockDeviceInfo' ] } +{ 'command': 'query-named-block-nodes', + 'returns': [ 'BlockDeviceInfo' ], + 'data': { '*flat': 'bool' } } ## # @XDbgBlockGraphNodeType: diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index d7fbc6b1f4..c9c54de1df 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -39,9 +39,9 @@ SRST ERST DEF("convert", img_convert, - "convert [--object objectdef] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename") + "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file] [-o options] [-l snapshot_param] [-S sparse_size] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename") SRST -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME ERST DEF("create", img_create, diff --git a/qemu-img.c b/qemu-img.c index 2b4562b9d9..804630a368 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -70,6 +70,7 @@ enum { OPTION_PREALLOCATION = 265, OPTION_SHRINK = 266, OPTION_SALVAGE = 267, + OPTION_TARGET_IS_ZERO = 268, }; typedef enum OutputFormat { @@ -1984,10 +1985,9 @@ static int convert_do_copy(ImgConvertState *s) int64_t sector_num = 0; /* Check whether we have zero initialisation or can get it efficiently */ - if (s->target_is_new && s->min_sparse && !s->target_has_backing) { + if (!s->has_zero_init && s->target_is_new && s->min_sparse && + !s->target_has_backing) { s->has_zero_init = bdrv_has_zero_init(blk_bs(s->target)); - } else { - s->has_zero_init = false; } if (!s->has_zero_init && !s->target_has_backing && @@ -2086,6 +2086,7 @@ static int img_convert(int argc, char **argv) {"force-share", no_argument, 0, 'U'}, {"target-image-opts", no_argument, 0, OPTION_TARGET_IMAGE_OPTS}, {"salvage", no_argument, 0, OPTION_SALVAGE}, + {"target-is-zero", no_argument, 0, OPTION_TARGET_IS_ZERO}, {0, 0, 0, 0} }; c = getopt_long(argc, argv, ":hf:O:B:Cco:l:S:pt:T:qnm:WU", @@ -2209,6 +2210,14 @@ static int img_convert(int argc, char **argv) case OPTION_TARGET_IMAGE_OPTS: tgt_image_opts = true; break; + case OPTION_TARGET_IS_ZERO: + /* + * The user asserting that the target is blank has the + * same effect as the target driver supporting zero + * initialisation. + */ + s.has_zero_init = true; + break; } } @@ -2247,6 +2256,11 @@ static int img_convert(int argc, char **argv) warn_report("This will become an error in future QEMU versions."); } + if (s.has_zero_init && !skip_create) { + error_report("--target-is-zero requires use of -n flag"); + goto fail_getopt; + } + s.src_num = argc - optind - 1; out_filename = s.src_num >= 1 ? argv[argc - 1] : NULL; @@ -2380,6 +2394,12 @@ static int img_convert(int argc, char **argv) } s.target_has_backing = (bool) out_baseimg; + if (s.has_zero_init && s.target_has_backing) { + error_report("Cannot use --target-is-zero when the destination " + "image has a backing file"); + goto out; + } + if (s.src_num > 1 && out_baseimg) { error_report("Having a backing file for the target makes no sense when " "concatenating multiple input images"); @@ -2503,7 +2523,7 @@ static int img_convert(int argc, char **argv) } } - if (s.target_has_backing) { + if (s.target_has_backing && s.target_is_new) { /* Errors are treated as "backing length unknown" (which means * s.target_backing_sectors has to be negative, which it will * be automatically). The backing file length is used only diff --git a/qtest.c b/qtest.c index 12432f99cf..587dcbb4b5 100644 --- a/qtest.c +++ b/qtest.c @@ -27,7 +27,8 @@ #include "qemu/error-report.h" #include "qemu/module.h" #include "qemu/cutils.h" -#ifdef TARGET_PPC64 +#include "config-devices.h" +#ifdef CONFIG_PSERIES #include "hw/ppc/spapr_rtas.h" #endif @@ -628,7 +629,7 @@ static void qtest_process_command(CharBackend *chr, gchar **words) #else qtest_sendf(chr, "OK little\n"); #endif -#ifdef TARGET_PPC64 +#ifdef CONFIG_PSERIES } else if (strcmp(words[0], "rtas") == 0) { uint64_t res, args, ret; unsigned long nargs, nret; diff --git a/target/arm/cpu.c b/target/arm/cpu.c index de733aceeb..2eadf4dcb8 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1009,11 +1009,10 @@ static void arm_cpu_dump_state(CPUState *cs, FILE *f, int flags) if (flags & CPU_DUMP_FPU) { int numvfpregs = 0; - if (arm_feature(env, ARM_FEATURE_VFP)) { - numvfpregs += 16; - } - if (arm_feature(env, ARM_FEATURE_VFP3)) { - numvfpregs += 16; + if (cpu_isar_feature(aa32_simd_r32, cpu)) { + numvfpregs = 32; + } else if (arm_feature(env, ARM_FEATURE_VFP)) { + numvfpregs = 16; } for (i = 0; i < numvfpregs; i++) { uint64_t v = *aa32_vfp_dreg(env, i); @@ -1586,7 +1585,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Presence of EL2 itself is ARM_FEATURE_EL2, and of the * Security Extensions is ARM_FEATURE_EL3. */ - assert(!tcg_enabled() || no_aa32 || cpu_isar_feature(arm_div, cpu)); + assert(!tcg_enabled() || no_aa32 || + cpu_isar_feature(aa32_arm_div, cpu)); set_feature(env, ARM_FEATURE_LPAE); set_feature(env, ARM_FEATURE_V7); } @@ -1612,7 +1612,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) if (arm_feature(env, ARM_FEATURE_V6)) { set_feature(env, ARM_FEATURE_V5); if (!arm_feature(env, ARM_FEATURE_M)) { - assert(!tcg_enabled() || no_aa32 || cpu_isar_feature(jazelle, cpu)); + assert(!tcg_enabled() || no_aa32 || + cpu_isar_feature(aa32_jazelle, cpu)); set_feature(env, ARM_FEATURE_AUXCR); } } @@ -1716,8 +1717,9 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu); #endif } else { - cpu->id_aa64dfr0 &= ~0xf00; - cpu->id_dfr0 &= ~(0xf << 24); + cpu->isar.id_aa64dfr0 = + FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0); + cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } @@ -1870,10 +1872,11 @@ static void arm926_initfn(Object *obj) */ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); /* - * Similarly, we need to set MVFR0 fields to enable double precision - * and short vector support even though ARMv5 doesn't have this register. + * Similarly, we need to set MVFR0 fields to enable vfp and short vector + * support even though ARMv5 doesn't have this register. */ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); } @@ -1912,10 +1915,11 @@ static void arm1026_initfn(Object *obj) */ cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); /* - * Similarly, we need to set MVFR0 fields to enable double precision - * and short vector support even though ARMv5 doesn't have this register. + * Similarly, we need to set MVFR0 fields to enable vfp and short vector + * support even though ARMv5 doesn't have this register. */ cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); { @@ -1955,11 +1959,11 @@ static void arm1136_r2_initfn(Object *obj) cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; + cpu->isar.id_dfr0 = 0x2; cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222110; cpu->isar.id_isar0 = 0x00140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231111; @@ -1987,11 +1991,11 @@ static void arm1136_initfn(Object *obj) cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0x2; + cpu->isar.id_dfr0 = 0x2; cpu->id_afr0 = 0x3; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222110; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222110; cpu->isar.id_isar0 = 0x00140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231111; @@ -2020,11 +2024,11 @@ static void arm1176_initfn(Object *obj) cpu->reset_sctlr = 0x00050078; cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x33; + cpu->isar.id_dfr0 = 0x33; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x01130003; - cpu->id_mmfr1 = 0x10030302; - cpu->id_mmfr2 = 0x01222100; + cpu->isar.id_mmfr0 = 0x01130003; + cpu->isar.id_mmfr1 = 0x10030302; + cpu->isar.id_mmfr2 = 0x01222100; cpu->isar.id_isar0 = 0x0140011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11231121; @@ -2050,11 +2054,11 @@ static void arm11mpcore_initfn(Object *obj) cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ cpu->id_pfr0 = 0x111; cpu->id_pfr1 = 0x1; - cpu->id_dfr0 = 0; + cpu->isar.id_dfr0 = 0; cpu->id_afr0 = 0x2; - cpu->id_mmfr0 = 0x01100103; - cpu->id_mmfr1 = 0x10020302; - cpu->id_mmfr2 = 0x01222000; + cpu->isar.id_mmfr0 = 0x01100103; + cpu->isar.id_mmfr1 = 0x10020302; + cpu->isar.id_mmfr2 = 0x01222000; cpu->isar.id_isar0 = 0x00100011; cpu->isar.id_isar1 = 0x12002111; cpu->isar.id_isar2 = 0x11221011; @@ -2082,12 +2086,12 @@ static void cortex_m3_initfn(Object *obj) cpu->pmsav7_dregion = 8; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; - cpu->id_dfr0 = 0x00100000; + cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00000030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x00000000; - cpu->id_mmfr3 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01141110; cpu->isar.id_isar1 = 0x02111000; cpu->isar.id_isar2 = 0x21112231; @@ -2113,12 +2117,12 @@ static void cortex_m4_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000000; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; - cpu->id_dfr0 = 0x00100000; + cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00000030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x00000000; - cpu->id_mmfr3 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00000030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x00000000; + cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01141110; cpu->isar.id_isar1 = 0x02111000; cpu->isar.id_isar2 = 0x21112231; @@ -2144,12 +2148,12 @@ static void cortex_m7_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000040; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000200; - cpu->id_dfr0 = 0x00100000; + cpu->isar.id_dfr0 = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00100030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x01000000; - cpu->id_mmfr3 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00100030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01101110; cpu->isar.id_isar1 = 0x02112000; cpu->isar.id_isar2 = 0x20232231; @@ -2177,12 +2181,12 @@ static void cortex_m33_initfn(Object *obj) cpu->isar.mvfr2 = 0x00000040; cpu->id_pfr0 = 0x00000030; cpu->id_pfr1 = 0x00000210; - cpu->id_dfr0 = 0x00200000; + cpu->isar.id_dfr0 = 0x00200000; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x00101F40; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x01000000; - cpu->id_mmfr3 = 0x00000000; + cpu->isar.id_mmfr0 = 0x00101F40; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01000000; + cpu->isar.id_mmfr3 = 0x00000000; cpu->isar.id_isar0 = 0x01101110; cpu->isar.id_isar1 = 0x02212000; cpu->isar.id_isar2 = 0x20232232; @@ -2229,12 +2233,12 @@ static void cortex_r5_initfn(Object *obj) cpu->midr = 0x411fc153; /* r1p3 */ cpu->id_pfr0 = 0x0131; cpu->id_pfr1 = 0x001; - cpu->id_dfr0 = 0x010400; + cpu->isar.id_dfr0 = 0x010400; cpu->id_afr0 = 0x0; - cpu->id_mmfr0 = 0x0210030; - cpu->id_mmfr1 = 0x00000000; - cpu->id_mmfr2 = 0x01200000; - cpu->id_mmfr3 = 0x0211; + cpu->isar.id_mmfr0 = 0x0210030; + cpu->isar.id_mmfr1 = 0x00000000; + cpu->isar.id_mmfr2 = 0x01200000; + cpu->isar.id_mmfr3 = 0x0211; cpu->isar.id_isar0 = 0x02101111; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232141; @@ -2284,18 +2288,18 @@ static void cortex_a8_initfn(Object *obj) cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x400; + cpu->isar.id_dfr0 = 0x400; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x31100003; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01202000; - cpu->id_mmfr3 = 0x11; + cpu->isar.id_mmfr0 = 0x31100003; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01202000; + cpu->isar.id_mmfr3 = 0x11; cpu->isar.id_isar0 = 0x00101111; cpu->isar.id_isar1 = 0x12112111; cpu->isar.id_isar2 = 0x21232031; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x00111142; - cpu->dbgdidr = 0x15141000; + cpu->isar.dbgdidr = 0x15141000; cpu->clidr = (1 << 27) | (2 << 24) | 3; cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ @@ -2357,18 +2361,18 @@ static void cortex_a9_initfn(Object *obj) cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x1031; cpu->id_pfr1 = 0x11; - cpu->id_dfr0 = 0x000; + cpu->isar.id_dfr0 = 0x000; cpu->id_afr0 = 0; - cpu->id_mmfr0 = 0x00100103; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01230000; - cpu->id_mmfr3 = 0x00002111; + cpu->isar.id_mmfr0 = 0x00100103; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01230000; + cpu->isar.id_mmfr3 = 0x00002111; cpu->isar.id_isar0 = 0x00101111; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x00111142; - cpu->dbgdidr = 0x35141000; + cpu->isar.dbgdidr = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ @@ -2422,12 +2426,12 @@ static void cortex_a7_initfn(Object *obj) cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x02010555; + cpu->isar.id_dfr0 = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01240000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01240000; + cpu->isar.id_mmfr3 = 0x02102211; /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ @@ -2436,7 +2440,7 @@ static void cortex_a7_initfn(Object *obj) cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; - cpu->dbgdidr = 0x3515f005; + cpu->isar.dbgdidr = 0x3515f005; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -2468,18 +2472,18 @@ static void cortex_a15_initfn(Object *obj) cpu->reset_sctlr = 0x00c50078; cpu->id_pfr0 = 0x00001131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x02010555; + cpu->isar.id_dfr0 = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10201105; - cpu->id_mmfr1 = 0x20000000; - cpu->id_mmfr2 = 0x01240000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x20000000; + cpu->isar.id_mmfr2 = 0x01240000; + cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232041; cpu->isar.id_isar3 = 0x11112131; cpu->isar.id_isar4 = 0x10011142; - cpu->dbgdidr = 0x3515f021; + cpu->isar.dbgdidr = 0x3515f021; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -2709,13 +2713,14 @@ static void arm_max_initfn(Object *obj) t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ cpu->isar.mvfr2 = t; - t = cpu->id_mmfr3; + t = cpu->isar.id_mmfr3; t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->id_mmfr3 = t; + cpu->isar.id_mmfr3 = t; - t = cpu->id_mmfr4; + t = cpu->isar.id_mmfr4; t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */ - cpu->id_mmfr4 = t; + t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ + cpu->isar.id_mmfr4 = t; } #endif } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index e943ffe8a9..65171cb30e 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -853,6 +853,11 @@ struct ARMCPU { * prefix means a constant register. * Some of these registers are split out into a substructure that * is shared with the translators to control the ISA. + * + * Note that if you add an ID register to the ARMISARegisters struct + * you need to also update the 32-bit and 64-bit versions of the + * kvm_arm_get_host_cpu_features() function to correctly populate the + * field by reading the value from the KVM vCPU. */ struct ARMISARegisters { uint32_t id_isar0; @@ -862,9 +867,16 @@ struct ARMCPU { uint32_t id_isar4; uint32_t id_isar5; uint32_t id_isar6; + uint32_t id_mmfr0; + uint32_t id_mmfr1; + uint32_t id_mmfr2; + uint32_t id_mmfr3; + uint32_t id_mmfr4; uint32_t mvfr0; uint32_t mvfr1; uint32_t mvfr2; + uint32_t id_dfr0; + uint32_t dbgdidr; uint64_t id_aa64isar0; uint64_t id_aa64isar1; uint64_t id_aa64pfr0; @@ -872,6 +884,8 @@ struct ARMCPU { uint64_t id_aa64mmfr0; uint64_t id_aa64mmfr1; uint64_t id_aa64mmfr2; + uint64_t id_aa64dfr0; + uint64_t id_aa64dfr1; } isar; uint32_t midr; uint32_t revidr; @@ -880,20 +894,11 @@ struct ARMCPU { uint32_t reset_sctlr; uint32_t id_pfr0; uint32_t id_pfr1; - uint32_t id_dfr0; uint64_t pmceid0; uint64_t pmceid1; uint32_t id_afr0; - uint32_t id_mmfr0; - uint32_t id_mmfr1; - uint32_t id_mmfr2; - uint32_t id_mmfr3; - uint32_t id_mmfr4; - uint64_t id_aa64dfr0; - uint64_t id_aa64dfr1; uint64_t id_aa64afr0; uint64_t id_aa64afr1; - uint32_t dbgdidr; uint32_t clidr; uint64_t mp_affinity; /* MP ID without feature bits */ /* The elements of this array are the CCSIDR values for each cache, @@ -1821,6 +1826,16 @@ FIELD(ID_AA64MMFR2, BBM, 52, 4) FIELD(ID_AA64MMFR2, EVT, 56, 4) FIELD(ID_AA64MMFR2, E0PD, 60, 4) +FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) +FIELD(ID_AA64DFR0, TRACEVER, 4, 4) +FIELD(ID_AA64DFR0, PMUVER, 8, 4) +FIELD(ID_AA64DFR0, BRPS, 12, 4) +FIELD(ID_AA64DFR0, WRPS, 20, 4) +FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) +FIELD(ID_AA64DFR0, PMSVER, 32, 4) +FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) +FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) + FIELD(ID_DFR0, COPDBG, 0, 4) FIELD(ID_DFR0, COPSDBG, 4, 4) FIELD(ID_DFR0, MMAPDBG, 8, 4) @@ -1830,6 +1845,13 @@ FIELD(ID_DFR0, MPROFDBG, 20, 4) FIELD(ID_DFR0, PERFMON, 24, 4) FIELD(ID_DFR0, TRACEFILT, 28, 4) +FIELD(DBGDIDR, SE_IMP, 12, 1) +FIELD(DBGDIDR, NSUHD_IMP, 14, 1) +FIELD(DBGDIDR, VERSION, 16, 4) +FIELD(DBGDIDR, CTX_CMPS, 20, 4) +FIELD(DBGDIDR, BRPS, 24, 4) +FIELD(DBGDIDR, WRPS, 28, 4) + FIELD(MVFR0, SIMDREG, 0, 4) FIELD(MVFR0, FPSP, 4, 4) FIELD(MVFR0, FPDP, 8, 4) @@ -3325,19 +3347,35 @@ static inline uint64_t *aa64_vfp_qreg(CPUARMState *env, unsigned regno) extern const uint64_t pred_esz_masks[4]; /* + * Naming convention for isar_feature functions: + * Functions which test 32-bit ID registers should have _aa32_ in + * their name. Functions which test 64-bit ID registers should have + * _aa64_ in their name. These must only be used in code where we + * know for certain that the CPU has AArch32 or AArch64 respectively + * or where the correct answer for a CPU which doesn't implement that + * CPU state is "false" (eg when generating A32 or A64 code, if adding + * system registers that are specific to that CPU state, for "should + * we let this system register bit be set" tests where the 32-bit + * flavour of the register doesn't have the bit, and so on). + * Functions which simply ask "does this feature exist at all" have + * _any_ in their name, and always return the logical OR of the _aa64_ + * and the _aa32_ function. + */ + +/* * 32-bit feature tests via id registers. */ -static inline bool isar_feature_thumb_div(const ARMISARegisters *id) +static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; } -static inline bool isar_feature_arm_div(const ARMISARegisters *id) +static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; } -static inline bool isar_feature_jazelle(const ARMISARegisters *id) +static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) { return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; } @@ -3412,21 +3450,21 @@ static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; } -static inline bool isar_feature_aa32_fp_d32(const ARMISARegisters *id) +static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) { /* Return true if D16-D31 are implemented */ - return FIELD_EX64(id->mvfr0, MVFR0, SIMDREG) >= 2; + return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; } static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr0, MVFR0, FPSHVEC) > 0; + return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; } static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point */ - return FIELD_EX64(id->mvfr0, MVFR0, FPDP) > 0; + return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; } /* @@ -3436,42 +3474,66 @@ static inline bool isar_feature_aa32_fpdp(const ARMISARegisters *id) */ static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 0; + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; } static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr1, MVFR1, FPHP) > 1; + return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; } static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 1; + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; } static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 2; + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; } static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 3; + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; } static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr2, MVFR2, FPMISC) >= 4; + return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; } static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) != 0; + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; } static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) { - return FIELD_EX64(id->mvfr0, ID_MMFR3, PAN) >= 2; + return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; +} + +static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) +{ + /* 0xf means "non-standard IMPDEF PMU" */ + return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && + FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; +} + +static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; +} + +static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; } /* @@ -3653,6 +3715,41 @@ static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; } +static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) +{ + return FIELD_EX32(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && + FIELD_EX32(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; +} + +/* + * Feature tests for "does this exist in either 32-bit or 64-bit?" + */ +static inline bool isar_feature_any_fp16(const ARMISARegisters *id) +{ + return isar_feature_aa64_fp16(id) || isar_feature_aa32_fp16_arith(id); +} + +static inline bool isar_feature_any_predinv(const ARMISARegisters *id) +{ + return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id); +} + +static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id); +} + +static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id) +{ + return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id); +} + /* * Forward to the above feature tests given an ARMCPU pointer. */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index f0d98bc79d..0929401a4d 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -121,12 +121,12 @@ static void aarch64_a57_initfn(Object *obj) cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; @@ -135,10 +135,10 @@ static void aarch64_a57_initfn(Object *obj) cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_isar6 = 0; cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->dbgdidr = 0x3516d000; + cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -175,12 +175,12 @@ static void aarch64_a53_initfn(Object *obj) cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10101105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.id_mmfr0 = 0x10101105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; @@ -189,10 +189,10 @@ static void aarch64_a53_initfn(Object *obj) cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_isar6 = 0; cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ - cpu->dbgdidr = 0x3516d000; + cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ @@ -228,12 +228,12 @@ static void aarch64_a72_initfn(Object *obj) cpu->reset_sctlr = 0x00c50838; cpu->id_pfr0 = 0x00000131; cpu->id_pfr1 = 0x00011011; - cpu->id_dfr0 = 0x03010066; + cpu->isar.id_dfr0 = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->id_mmfr0 = 0x10201105; - cpu->id_mmfr1 = 0x40000000; - cpu->id_mmfr2 = 0x01260000; - cpu->id_mmfr3 = 0x02102211; + cpu->isar.id_mmfr0 = 0x10201105; + cpu->isar.id_mmfr1 = 0x40000000; + cpu->isar.id_mmfr2 = 0x01260000; + cpu->isar.id_mmfr3 = 0x02102211; cpu->isar.id_isar0 = 0x02101110; cpu->isar.id_isar1 = 0x13112111; cpu->isar.id_isar2 = 0x21232042; @@ -241,10 +241,10 @@ static void aarch64_a72_initfn(Object *obj) cpu->isar.id_isar4 = 0x00011142; cpu->isar.id_isar5 = 0x00011121; cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->id_aa64dfr0 = 0x10305106; + cpu->isar.id_aa64dfr0 = 0x10305106; cpu->isar.id_aa64isar0 = 0x00011120; cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->dbgdidr = 0x3516d000; + cpu->isar.dbgdidr = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -699,9 +699,21 @@ static void aarch64_max_initfn(Object *obj) u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1); cpu->isar.id_isar6 = u; - u = cpu->id_mmfr3; + u = cpu->isar.id_mmfr3; u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->id_mmfr3 = u; + cpu->isar.id_mmfr3 = u; + + u = cpu->isar.id_mmfr4; + u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ + cpu->isar.id_mmfr4 = u; + + u = cpu->isar.id_aa64dfr0; + u = FIELD_DP64(u, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */ + cpu->isar.id_aa64dfr0 = u; + + u = cpu->isar.id_dfr0; + u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */ + cpu->isar.id_dfr0 = u; /* * FIXME: We do not yet support ARMv8.2-fp16 for AArch32 yet, diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index 2e3e90c6a5..2ff72d47d1 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -16,8 +16,8 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) { CPUARMState *env = &cpu->env; uint64_t bcr = env->cp15.dbgbcr[lbn]; - int brps = extract32(cpu->dbgdidr, 24, 4); - int ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + int brps = arm_num_brps(cpu); + int ctx_cmps = arm_num_ctx_cmps(cpu); int bt; uint32_t contextidr; uint64_t hcr_el2; @@ -29,7 +29,7 @@ static bool linked_bp_matches(ARMCPU *cpu, int lbn) * case DBGWCR<n>_EL1.LBN must indicate that breakpoint). * We choose the former. */ - if (lbn > brps || lbn < (brps - ctx_cmps)) { + if (lbn >= brps || lbn < (brps - ctx_cmps)) { return false; } diff --git a/target/arm/helper-sve.h b/target/arm/helper-sve.h index 9e79182ab4..2f47279155 100644 --- a/target/arm/helper-sve.h +++ b/target/arm/helper-sve.h @@ -1574,3 +1574,5 @@ DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr, tl, i32) + +DEF_HELPER_FLAGS_4(sve2_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) diff --git a/target/arm/helper.c b/target/arm/helper.c index 366dbcf460..79db169e04 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -25,6 +25,7 @@ #include "hw/semihosting/semihost.h" #include "sysemu/cpus.h" #include "sysemu/kvm.h" +#include "sysemu/tcg.h" #include "qemu/range.h" #include "qapi/qapi-commands-machine-target.h" #include "qapi/error.h" @@ -49,10 +50,10 @@ static void switch_mode(CPUARMState *env, int mode); static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) { - int nregs; + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; /* VFP data registers are always little-endian. */ - nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; if (reg < nregs) { stq_le_p(buf, *aa32_vfp_dreg(env, reg)); return 8; @@ -77,9 +78,9 @@ static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) { - int nregs; + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; - nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16; if (reg < nregs) { *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); return 8; @@ -905,8 +906,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* VFPv3 and upwards with NEON implement 32 double precision * registers (D0-D31). */ - if (!arm_feature(env, ARM_FEATURE_NEON) || - !arm_feature(env, ARM_FEATURE_VFP3)) { + if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) { /* D32DIS [30] is RAO/WI if D16-31 are not implemented. */ value |= (1 << 30); } @@ -1016,11 +1016,17 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { #define PMCRN_MASK 0xf800 #define PMCRN_SHIFT 11 #define PMCRLC 0x40 -#define PMCRDP 0x10 +#define PMCRDP 0x20 +#define PMCRX 0x10 #define PMCRD 0x8 #define PMCRC 0x4 #define PMCRP 0x2 #define PMCRE 0x1 +/* + * Mask of PMCR bits writeable by guest (not including WO bits like C, P, + * which can be written as 1 to trigger behaviour but which stay RAZ). + */ +#define PMCR_WRITEABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE) #define PMXEVTYPER_P 0x80000000 #define PMXEVTYPER_U 0x40000000 @@ -1123,6 +1129,30 @@ static int64_t instructions_ns_per(uint64_t icount) } #endif +static bool pmu_8_1_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); +} + +static bool pmu_8_4_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmu_8_4, env_archcpu(env)); +} + +static uint64_t zero_event_get_count(CPUARMState *env) +{ + /* For events which on QEMU never fire, so their count is always zero */ + return 0; +} + +static int64_t zero_event_ns_per(uint64_t cycles) +{ + /* An event which never fires can never overflow */ + return -1; +} + static const pm_event pm_events[] = { { .number = 0x000, /* SW_INCR */ .supported = event_always_supported, @@ -1139,8 +1169,23 @@ static const pm_event pm_events[] = { .supported = event_always_supported, .get_count = cycles_get_count, .ns_per_count = cycles_ns_per, - } + }, #endif + { .number = 0x023, /* STALL_FRONTEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x024, /* STALL_BACKEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x03c, /* STALL */ + .supported = pmu_8_4_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, }; /* @@ -1149,7 +1194,7 @@ static const pm_event pm_events[] = { * should first be updated to something sparse instead of the current * supported_event_map[] array. */ -#define MAX_EVENT_ID 0x11 +#define MAX_EVENT_ID 0x3c #define UNSUPPORTED_EVENT UINT16_MAX static uint16_t supported_event_map[MAX_EVENT_ID + 1]; @@ -1536,9 +1581,8 @@ static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri, } } - /* only the DP, X, D and E bits are writable */ - env->cp15.c9_pmcr &= ~0x39; - env->cp15.c9_pmcr |= (value & 0x39); + env->cp15.c9_pmcr &= ~PMCR_WRITEABLE_MASK; + env->cp15.c9_pmcr |= (value & PMCR_WRITEABLE_MASK); pmu_op_finish(env); } @@ -6251,26 +6295,16 @@ static void define_debug_regs(ARMCPU *cpu) ARMCPRegInfo dbgdidr = { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = cpu->dbgdidr, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, }; /* Note that all these register fields hold "number of Xs minus 1". */ - brps = extract32(cpu->dbgdidr, 24, 4); - wrps = extract32(cpu->dbgdidr, 28, 4); - ctx_cmps = extract32(cpu->dbgdidr, 20, 4); + brps = arm_num_brps(cpu); + wrps = arm_num_wrps(cpu); + ctx_cmps = arm_num_ctx_cmps(cpu); assert(ctx_cmps <= brps); - /* The DBGDIDR and ID_AA64DFR0_EL1 define various properties - * of the debug registers such as number of breakpoints; - * check that if they both exist then they agree. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - assert(extract32(cpu->id_aa64dfr0, 12, 4) == brps); - assert(extract32(cpu->id_aa64dfr0, 20, 4) == wrps); - assert(extract32(cpu->id_aa64dfr0, 28, 4) == ctx_cmps); - } - define_one_arm_cp_reg(cpu, &dbgdidr); define_arm_cp_regs(cpu, debug_cp_reginfo); @@ -6278,7 +6312,7 @@ static void define_debug_regs(ARMCPU *cpu) define_arm_cp_regs(cpu, debug_lpae_cp_reginfo); } - for (i = 0; i < brps + 1; i++) { + for (i = 0; i < brps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4, @@ -6297,7 +6331,7 @@ static void define_debug_regs(ARMCPU *cpu) define_arm_cp_regs(cpu, dbgregs); } - for (i = 0; i < wrps + 1; i++) { + for (i = 0; i < wrps; i++) { ARMCPRegInfo dbgregs[] = { { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH, .cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6, @@ -6317,6 +6351,96 @@ static void define_debug_regs(ARMCPU *cpu) } } +static void define_pmu_regs(ARMCPU *cpu) +{ + /* + * v7 performance monitor control register: same implementor + * field as main ID register, and we implement four counters in + * addition to the cycle count register. + */ + unsigned int i, pmcrn = 4; + ARMCPRegInfo pmcr = { + .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, + .type = ARM_CP_IO | ARM_CP_ALIAS, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), + .accessfn = pmreg_access, .writefn = pmcr_write, + .raw_writefn = raw_write, + }; + ARMCPRegInfo pmcr64 = { + .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), + .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) | + PMCRLC, + .writefn = pmcr_write, .raw_writefn = raw_write, + }; + define_one_arm_cp_reg(cpu, &pmcr); + define_one_arm_cp_reg(cpu, &pmcr64); + for (i = 0; i < pmcrn; i++) { + char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); + char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); + char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); + char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); + ARMCPRegInfo pmev_regs[] = { + { .name = pmevcntr_name, .cp = 15, .crn = 14, + .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .accessfn = pmreg_access }, + { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, + .raw_readfn = pmevcntr_rawread, + .raw_writefn = pmevcntr_rawwrite }, + { .name = pmevtyper_name, .cp = 15, .crn = 14, + .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, + .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .accessfn = pmreg_access }, + { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), + .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, + .type = ARM_CP_IO, + .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, + .raw_writefn = pmevtyper_rawwrite }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, pmev_regs); + g_free(pmevcntr_name); + g_free(pmevcntr_el0_name); + g_free(pmevtyper_name); + g_free(pmevtyper_el0_name); + } + if (cpu_isar_feature(aa32_pmu_8_1, cpu)) { + ARMCPRegInfo v81_pmu_regs[] = { + { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid0, 32, 32) }, + { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, + .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = extract64(cpu->pmceid1, 32, 32) }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, v81_pmu_regs); + } + if (cpu_isar_feature(any_pmu_8_4, cpu)) { + static const ARMCPRegInfo v84_pmmir = { + .name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6, + .access = PL1_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, + .resetvalue = 0 + }; + define_one_arm_cp_reg(cpu, &v84_pmmir); + } +} + /* We don't know until after realize whether there's a GICv3 * attached, and that is what registers the gicv3 sysregs. * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1 @@ -6737,6 +6861,27 @@ static const ARMCPRegInfo ats1cp_reginfo[] = { }; #endif +/* + * ACTLR2 and HACTLR2 map to ACTLR_EL1[63:32] and + * ACTLR_EL2[63:32]. They exist only if the ID_MMFR4.AC2 field + * is non-zero, which is never for ARMv7, optionally in ARMv8 + * and mandatorily for ARMv8.2 and up. + * ACTLR2 is banked for S and NS if EL3 is AArch32. Since QEMU's + * implementation is RAZ/WI we can ignore this detail, as we + * do for ACTLR. + */ +static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { + { .name = "ACTLR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 3, + .access = PL1_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + { .name = "HACTLR2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, + .access = PL2_RW, .type = ARM_CP_CONST, + .resetvalue = 0 }, + REGINFO_SENTINEL +}; + void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ @@ -6775,7 +6920,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_dfr0 }, + .resetvalue = cpu->isar.id_dfr0 }, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -6785,22 +6930,22 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_mmfr0 }, + .resetvalue = cpu->isar.id_mmfr0 }, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_mmfr1 }, + .resetvalue = cpu->isar.id_mmfr1 }, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_mmfr2 }, + .resetvalue = cpu->isar.id_mmfr2 }, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_mmfr3 }, + .resetvalue = cpu->isar.id_mmfr3 }, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, @@ -6835,7 +6980,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->id_mmfr4 }, + .resetvalue = cpu->isar.id_mmfr4 }, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, @@ -6859,67 +7004,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, pmovsset_cp_reginfo); } if (arm_feature(env, ARM_FEATURE_V7)) { - /* v7 performance monitor control register: same implementor - * field as main ID register, and we implement four counters in - * addition to the cycle count register. - */ - unsigned int i, pmcrn = 4; - ARMCPRegInfo pmcr = { - .name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0, - .access = PL0_RW, - .type = ARM_CP_IO | ARM_CP_ALIAS, - .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, .writefn = pmcr_write, - .raw_writefn = raw_write, - }; - ARMCPRegInfo pmcr64 = { - .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, - .access = PL0_RW, .accessfn = pmreg_access, - .type = ARM_CP_IO, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), - .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT), - .writefn = pmcr_write, .raw_writefn = raw_write, - }; - define_one_arm_cp_reg(cpu, &pmcr); - define_one_arm_cp_reg(cpu, &pmcr64); - for (i = 0; i < pmcrn; i++) { - char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); - char *pmevcntr_el0_name = g_strdup_printf("PMEVCNTR%d_EL0", i); - char *pmevtyper_name = g_strdup_printf("PMEVTYPER%d", i); - char *pmevtyper_el0_name = g_strdup_printf("PMEVTYPER%d_EL0", i); - ARMCPRegInfo pmev_regs[] = { - { .name = pmevcntr_name, .cp = 15, .crn = 14, - .crm = 8 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, - .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, - .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, - .accessfn = pmreg_access }, - { .name = pmevcntr_el0_name, .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 8 | (3 & (i >> 3)), - .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, - .type = ARM_CP_IO, - .readfn = pmevcntr_readfn, .writefn = pmevcntr_writefn, - .raw_readfn = pmevcntr_rawread, - .raw_writefn = pmevcntr_rawwrite }, - { .name = pmevtyper_name, .cp = 15, .crn = 14, - .crm = 12 | (3 & (i >> 3)), .opc1 = 0, .opc2 = i & 7, - .access = PL0_RW, .type = ARM_CP_IO | ARM_CP_ALIAS, - .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, - .accessfn = pmreg_access }, - { .name = pmevtyper_el0_name, .state = ARM_CP_STATE_AA64, - .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 12 | (3 & (i >> 3)), - .opc2 = i & 7, .access = PL0_RW, .accessfn = pmreg_access, - .type = ARM_CP_IO, - .readfn = pmevtyper_readfn, .writefn = pmevtyper_writefn, - .raw_writefn = pmevtyper_rawwrite }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, pmev_regs); - g_free(pmevcntr_name); - g_free(pmevcntr_el0_name); - g_free(pmevtyper_name); - g_free(pmevtyper_el0_name); - } ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, @@ -6930,24 +7014,10 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); define_debug_regs(cpu); + define_pmu_regs(cpu); } else { define_arm_cp_regs(cpu, not_v7_cp_reginfo); } - if (FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) >= 4 && - FIELD_EX32(cpu->id_dfr0, ID_DFR0, PERFMON) != 0xf) { - ARMCPRegInfo v81_pmu_regs[] = { - { .name = "PMCEID2", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .resetvalue = extract64(cpu->pmceid0, 32, 32) }, - { .name = "PMCEID3", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 5, - .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, - .resetvalue = extract64(cpu->pmceid1, 32, 32) }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, v81_pmu_regs); - } if (arm_feature(env, ARM_FEATURE_V8)) { /* AArch64 ID registers, which all have impdef reset values. * Note that within the ID register ranges the unused slots @@ -7005,12 +7075,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->id_aa64dfr0 }, + .resetvalue = cpu->isar.id_aa64dfr0 }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->id_aa64dfr1 }, + .resetvalue = cpu->isar.id_aa64dfr1 }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -7358,8 +7428,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) } else { define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo); define_arm_cp_regs(cpu, vmsa_cp_reginfo); - /* TTCBR2 is introduced with ARMv8.2-A32HPD. */ - if (FIELD_EX32(cpu->id_mmfr4, ID_MMFR4, HPDS) != 0) { + /* TTCBR2 is introduced with ARMv8.2-AA32HPD. */ + if (cpu_isar_feature(aa32_hpd, cpu)) { define_one_arm_cp_reg(cpu, &ttbcr2_reginfo); } } @@ -7396,7 +7466,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, lpae_cp_reginfo); } - if (cpu_isar_feature(jazelle, cpu)) { + if (cpu_isar_feature(aa32_jazelle, cpu)) { define_arm_cp_regs(cpu, jazelle_regs); } /* Slightly awkwardly, the OMAP and StrongARM cores need all of @@ -7573,15 +7643,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) REGINFO_SENTINEL }; define_arm_cp_regs(cpu, auxcr_reginfo); - if (arm_feature(env, ARM_FEATURE_V8)) { - /* HACTLR2 maps to ACTLR_EL2[63:32] and is not in ARMv7 */ - ARMCPRegInfo hactlr2_reginfo = { - .name = "HACTLR2", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 4, .crn = 1, .crm = 0, .opc2 = 3, - .access = PL2_RW, .type = ARM_CP_CONST, - .resetvalue = 0 - }; - define_one_arm_cp_reg(cpu, &hactlr2_reginfo); + if (cpu_isar_feature(aa32_ac2, cpu)) { + define_arm_cp_regs(cpu, actlr2_hactlr2_reginfo); } } @@ -7721,14 +7784,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) #endif /*CONFIG_USER_ONLY*/ #endif - /* - * While all v8.0 cpus support aarch64, QEMU does have configurations - * that do not set ID_AA64ISAR1, e.g. user-only qemu-arm -cpu max, - * which will set ID_ISAR6. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64) - ? cpu_isar_feature(aa64_predinv, cpu) - : cpu_isar_feature(aa32_predinv, cpu)) { + if (cpu_isar_feature(any_predinv, cpu)) { define_arm_cp_regs(cpu, predinv_reginfo); } @@ -7755,7 +7811,7 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) } else if (arm_feature(env, ARM_FEATURE_NEON)) { gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 51, "arm-neon.xml", 0); - } else if (arm_feature(env, ARM_FEATURE_VFP3)) { + } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, 35, "arm-vfp3.xml", 0); } else if (arm_feature(env, ARM_FEATURE_VFP)) { @@ -8858,7 +8914,7 @@ static void take_aarch32_exception(CPUARMState *env, int new_mode, env->elr_el[2] = env->regs[15]; } else { /* CPSR.PAN is normally preserved preserved unless... */ - if (cpu_isar_feature(aa64_pan, env_archcpu(env))) { + if (cpu_isar_feature(aa32_pan, env_archcpu(env))) { switch (new_el) { case 3: if (!arm_is_secure_below_el3(env)) { @@ -10234,58 +10290,82 @@ static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs) } #endif /* !CONFIG_USER_ONLY */ -ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx) +static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx) { - uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; - bool tbi, tbid, epd, hpd, using16k, using64k; - int select, tsz; + if (regime_has_2_ranges(mmu_idx)) { + return extract64(tcr, 37, 2); + } else if (mmu_idx == ARMMMUIdx_Stage2) { + return 0; /* VTCR_EL2 */ + } else { + return extract32(tcr, 20, 1); + } +} - /* - * Bit 55 is always between the two regions, and is canonical for - * determining if address tagging is enabled. - */ - select = extract64(va, 55, 1); +static int aa64_va_parameter_tbid(uint64_t tcr, ARMMMUIdx mmu_idx) +{ + if (regime_has_2_ranges(mmu_idx)) { + return extract64(tcr, 51, 2); + } else if (mmu_idx == ARMMMUIdx_Stage2) { + return 0; /* VTCR_EL2 */ + } else { + return extract32(tcr, 29, 1); + } +} + +ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, + ARMMMUIdx mmu_idx, bool data) +{ + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; + bool epd, hpd, using16k, using64k; + int select, tsz, tbi; if (!regime_has_2_ranges(mmu_idx)) { + select = 0; tsz = extract32(tcr, 0, 6); using64k = extract32(tcr, 14, 1); using16k = extract32(tcr, 15, 1); if (mmu_idx == ARMMMUIdx_Stage2) { /* VTCR_EL2 */ - tbi = tbid = hpd = false; + hpd = false; } else { - tbi = extract32(tcr, 20, 1); hpd = extract32(tcr, 24, 1); - tbid = extract32(tcr, 29, 1); } epd = false; - } else if (!select) { - tsz = extract32(tcr, 0, 6); - epd = extract32(tcr, 7, 1); - using64k = extract32(tcr, 14, 1); - using16k = extract32(tcr, 15, 1); - tbi = extract64(tcr, 37, 1); - hpd = extract64(tcr, 41, 1); - tbid = extract64(tcr, 51, 1); } else { - int tg = extract32(tcr, 30, 2); - using16k = tg == 1; - using64k = tg == 3; - tsz = extract32(tcr, 16, 6); - epd = extract32(tcr, 23, 1); - tbi = extract64(tcr, 38, 1); - hpd = extract64(tcr, 42, 1); - tbid = extract64(tcr, 52, 1); + /* + * Bit 55 is always between the two regions, and is canonical for + * determining if address tagging is enabled. + */ + select = extract64(va, 55, 1); + if (!select) { + tsz = extract32(tcr, 0, 6); + epd = extract32(tcr, 7, 1); + using64k = extract32(tcr, 14, 1); + using16k = extract32(tcr, 15, 1); + hpd = extract64(tcr, 41, 1); + } else { + int tg = extract32(tcr, 30, 2); + using16k = tg == 1; + using64k = tg == 3; + tsz = extract32(tcr, 16, 6); + epd = extract32(tcr, 23, 1); + hpd = extract64(tcr, 42, 1); + } } tsz = MIN(tsz, 39); /* TODO: ARMv8.4-TTST */ tsz = MAX(tsz, 16); /* TODO: ARMv8.2-LVA */ + /* Present TBI as a composite with TBID. */ + tbi = aa64_va_parameter_tbi(tcr, mmu_idx); + if (!data) { + tbi &= ~aa64_va_parameter_tbid(tcr, mmu_idx); + } + tbi = (tbi >> select) & 1; + return (ARMVAParameters) { .tsz = tsz, .select = select, .tbi = tbi, - .tbid = tbid, .epd = epd, .hpd = hpd, .using16k = using16k, @@ -10293,16 +10373,6 @@ ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, }; } -ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx, bool data) -{ - ARMVAParameters ret = aa64_va_parameters_both(env, va, mmu_idx); - - /* Present TBI as a composite with TBID. */ - ret.tbi &= (data || !ret.tbid); - return ret; -} - #ifndef CONFIG_USER_ONLY static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, ARMMMUIdx mmu_idx) @@ -10388,7 +10458,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; uint32_t el = regime_el(env, mmu_idx); - bool ttbr1_valid; uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); bool guarded = false; @@ -10403,14 +10472,11 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, param = aa64_va_parameters(env, address, mmu_idx, access_type != MMU_INST_FETCH); level = 0; - ttbr1_valid = regime_has_2_ranges(mmu_idx); addrsize = 64 - 8 * param.tbi; inputsize = 64 - param.tsz; } else { param = aa32_va_parameters(env, address, mmu_idx); level = 1; - /* There is no TTBR1 for EL2 */ - ttbr1_valid = (el != 2); addrsize = (mmu_idx == ARMMMUIdx_Stage2 ? 40 : 32); inputsize = addrsize - param.tsz; } @@ -10427,7 +10493,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address, if (inputsize < addrsize) { target_ulong top_bits = sextract64(address, inputsize, addrsize - inputsize); - if (-top_bits != param.select || (param.select && !ttbr1_valid)) { + if (-top_bits != param.select) { /* The gap between the two regions is a Translation fault */ fault_type = ARMFault_Translation; goto do_fault; @@ -12136,21 +12202,15 @@ static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, { uint32_t flags = rebuild_hflags_aprofile(env); ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx); - ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1); + uint64_t tcr = regime_tcr(env, mmu_idx)->raw_tcr; uint64_t sctlr; int tbii, tbid; flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1); /* Get control bits for tagged addresses. */ - if (regime_has_2_ranges(mmu_idx)) { - ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1); - tbid = (p1.tbi << 1) | p0.tbi; - tbii = tbid & ~((p1.tbid << 1) | p0.tbid); - } else { - tbid = p0.tbi; - tbii = tbid & !p0.tbid; - } + tbid = aa64_va_parameter_tbi(tcr, mmu_idx); + tbii = tbid & ~aa64_va_parameter_tbid(tcr, mmu_idx); flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii); flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid); diff --git a/target/arm/helper.h b/target/arm/helper.h index aa3d8cd08f..fcbf504121 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -303,14 +303,8 @@ DEF_HELPER_2(neon_abd_s16, i32, i32, i32) DEF_HELPER_2(neon_abd_u32, i32, i32, i32) DEF_HELPER_2(neon_abd_s32, i32, i32, i32) -DEF_HELPER_2(neon_shl_u8, i32, i32, i32) -DEF_HELPER_2(neon_shl_s8, i32, i32, i32) DEF_HELPER_2(neon_shl_u16, i32, i32, i32) DEF_HELPER_2(neon_shl_s16, i32, i32, i32) -DEF_HELPER_2(neon_shl_u32, i32, i32, i32) -DEF_HELPER_2(neon_shl_s32, i32, i32, i32) -DEF_HELPER_2(neon_shl_u64, i64, i64, i64) -DEF_HELPER_2(neon_shl_s64, i64, i64, i64) DEF_HELPER_2(neon_rshl_u8, i32, i32, i32) DEF_HELPER_2(neon_rshl_s8, i32, i32, i32) DEF_HELPER_2(neon_rshl_u16, i32, i32, i32) @@ -348,8 +342,6 @@ DEF_HELPER_2(neon_sub_u8, i32, i32, i32) DEF_HELPER_2(neon_sub_u16, i32, i32, i32) DEF_HELPER_2(neon_mul_u8, i32, i32, i32) DEF_HELPER_2(neon_mul_u16, i32, i32, i32) -DEF_HELPER_2(neon_mul_p8, i32, i32, i32) -DEF_HELPER_2(neon_mull_p8, i64, i32, i32) DEF_HELPER_2(neon_tst_u8, i32, i32, i32) DEF_HELPER_2(neon_tst_u16, i32, i32, i32) @@ -569,9 +561,6 @@ DEF_HELPER_FLAGS_3(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_FLAGS_3(crc32c, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32) DEF_HELPER_2(dc_zva, void, env, i64) -DEF_HELPER_FLAGS_2(neon_pmull_64_lo, TCG_CALL_NO_RWG_SE, i64, i64, i64) -DEF_HELPER_FLAGS_2(neon_pmull_64_hi, TCG_CALL_NO_RWG_SE, i64, i64, i64) - DEF_HELPER_FLAGS_5(gvec_qrdmlah_s16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(gvec_qrdmlsh_s16, TCG_CALL_NO_RWG, @@ -697,6 +686,16 @@ DEF_HELPER_FLAGS_2(frint64_s, TCG_CALL_NO_RWG, f32, f32, ptr) DEF_HELPER_FLAGS_2(frint32_d, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(frint64_d, TCG_CALL_NO_RWG, f64, f64, ptr) +DEF_HELPER_FLAGS_4(gvec_sshl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_sshl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_ushl_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(gvec_pmul_b, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(gvec_pmull_q, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(neon_pmull_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) + #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" diff --git a/target/arm/internals.h b/target/arm/internals.h index 58c4d707c5..9f96a2359f 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -931,6 +931,48 @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) } } +/** + * arm_num_brps: Return number of implemented breakpoints. + * Note that the ID register BRPS field is "number of bps - 1", + * and we return the actual number of breakpoints. + */ +static inline int arm_num_brps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; + } +} + +/** + * arm_num_wrps: Return number of implemented watchpoints. + * Note that the ID register WRPS field is "number of wps - 1", + * and we return the actual number of watchpoints. + */ +static inline int arm_num_wrps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; + } +} + +/** + * arm_num_ctx_cmps: Return number of implemented context comparators. + * Note that the ID register CTX_CMPS field is "number of cmps - 1", + * and we return the actual number of comparators. + */ +static inline int arm_num_ctx_cmps(ARMCPU *cpu) +{ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; + } else { + return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; + } +} + /* Note make_memop_idx reserves 4 bits for mmu_idx, and MO_BSWAP is bit 3. * Thus a TCGMemOpIdx, without any MO_ALIGN bits, fits in 8 bits. */ @@ -1091,7 +1133,7 @@ static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, if ((features >> ARM_FEATURE_THUMB2) & 1) { valid |= CPSR_IT; } - if (isar_feature_jazelle(id)) { + if (isar_feature_aa32_jazelle(id)) { valid |= CPSR_J; } if (isar_feature_aa32_pan(id)) { @@ -1127,15 +1169,12 @@ typedef struct ARMVAParameters { unsigned tsz : 8; unsigned select : 1; bool tbi : 1; - bool tbid : 1; bool epd : 1; bool hpd : 1; bool using16k : 1; bool using64k : 1; } ARMVAParameters; -ARMVAParameters aa64_va_parameters_both(CPUARMState *env, uint64_t va, - ARMMMUIdx mmu_idx); ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ARMMMUIdx mmu_idx, bool data); diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c index 3a8b437eef..7981ae3bc4 100644 --- a/target/arm/kvm32.c +++ b/target/arm/kvm32.c @@ -97,6 +97,9 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ahcf->isar.id_isar6 = 0; } + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM_CP15_REG32(0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP | KVM_REG_ARM_VFP_MVFR0); @@ -108,6 +111,28 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * Fortunately there is not yet anything in there that affects migration. */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM_CP15_REG32(0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM_CP15_REG32(0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM_CP15_REG32(0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM_CP15_REG32(0, 0, 1, 7)); + if (read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM_CP15_REG32(0, 0, 2, 6))) { + /* + * Older kernels don't support reading ID_MMFR4 (a new in v8 + * register); assume it's zero. + */ + ahcf->isar.id_mmfr4 = 0; + } + + /* + * There is no way to read DBGDIDR, because currently 32-bit KVM + * doesn't implement debug at all. Leave it at zero. + */ + kvm_arm_destroy_scratch_host_vcpu(fdarray); if (err < 0) { diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index 3bae9e4a66..0ad96c3500 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -541,6 +541,10 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } else { err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, ARM64_SYS_REG(3, 0, 0, 4, 1)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + ARM64_SYS_REG(3, 0, 0, 5, 0)); + err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + ARM64_SYS_REG(3, 0, 0, 5, 1)); err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, ARM64_SYS_REG(3, 0, 0, 6, 0)); err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, @@ -559,6 +563,16 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + ARM64_SYS_REG(3, 0, 0, 1, 2)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + ARM64_SYS_REG(3, 0, 0, 1, 4)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + ARM64_SYS_REG(3, 0, 0, 1, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + ARM64_SYS_REG(3, 0, 0, 1, 6)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + ARM64_SYS_REG(3, 0, 0, 1, 7)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, ARM64_SYS_REG(3, 0, 0, 2, 0)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, @@ -571,6 +585,8 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 2, 4)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, ARM64_SYS_REG(3, 0, 0, 2, 5)); + err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + ARM64_SYS_REG(3, 0, 0, 2, 6)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, ARM64_SYS_REG(3, 0, 0, 2, 7)); @@ -580,6 +596,36 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ARM64_SYS_REG(3, 0, 0, 3, 1)); err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, ARM64_SYS_REG(3, 0, 0, 3, 2)); + + /* + * DBGDIDR is a bit complicated because the kernel doesn't + * provide an accessor for it in 64-bit mode, which is what this + * scratch VM is in, and there's no architected "64-bit sysreg + * which reads the same as the 32-bit register" the way there is + * for other ID registers. Instead we synthesize a value from the + * AArch64 ID_AA64DFR0, the same way the kernel code in + * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. + * We only do this if the CPU supports AArch32 at EL1. + */ + if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); + int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + int ctx_cmps = + FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + int version = 6; /* ARMv8 debug architecture */ + bool has_el3 = + !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + uint32_t dbgdidr = 0; + + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); + dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); + dbgdidr |= (1 << 15); /* RES1 bit */ + ahcf->isar.dbgdidr = dbgdidr; + } } sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0; diff --git a/target/arm/neon_helper.c b/target/arm/neon_helper.c index 4259056723..c7a8438b42 100644 --- a/target/arm/neon_helper.c +++ b/target/arm/neon_helper.c @@ -615,24 +615,9 @@ NEON_VOP(abd_u32, neon_u32, 1) } else { \ dest = src1 << tmp; \ }} while (0) -NEON_VOP(shl_u8, neon_u8, 4) NEON_VOP(shl_u16, neon_u16, 2) -NEON_VOP(shl_u32, neon_u32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; - if (shift >= 64 || shift <= -64) { - val = 0; - } else if (shift < 0) { - val >>= -shift; - } else { - val <<= shift; - } - return val; -} - #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ @@ -645,27 +630,9 @@ uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop) } else { \ dest = src1 << tmp; \ }} while (0) -NEON_VOP(shl_s8, neon_s8, 4) NEON_VOP(shl_s16, neon_s16, 2) -NEON_VOP(shl_s32, neon_s32, 1) #undef NEON_FN -uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop) -{ - int8_t shift = (int8_t)shiftop; - int64_t val = valop; - if (shift >= 64) { - val = 0; - } else if (shift <= -64) { - val >>= 63; - } else if (shift < 0) { - val >>= -shift; - } else { - val <<= shift; - } - return val; -} - #define NEON_FN(dest, src1, src2) do { \ int8_t tmp; \ tmp = (int8_t)src2; \ @@ -1162,60 +1129,6 @@ NEON_VOP(mul_u8, neon_u8, 4) NEON_VOP(mul_u16, neon_u16, 2) #undef NEON_FN -/* Polynomial multiplication is like integer multiplication except the - partial products are XORed, not added. */ -uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2) -{ - uint32_t mask; - uint32_t result; - result = 0; - while (op1) { - mask = 0; - if (op1 & 1) - mask |= 0xff; - if (op1 & (1 << 8)) - mask |= (0xff << 8); - if (op1 & (1 << 16)) - mask |= (0xff << 16); - if (op1 & (1 << 24)) - mask |= (0xff << 24); - result ^= op2 & mask; - op1 = (op1 >> 1) & 0x7f7f7f7f; - op2 = (op2 << 1) & 0xfefefefe; - } - return result; -} - -uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2) -{ - uint64_t result = 0; - uint64_t mask; - uint64_t op2ex = op2; - op2ex = (op2ex & 0xff) | - ((op2ex & 0xff00) << 8) | - ((op2ex & 0xff0000) << 16) | - ((op2ex & 0xff000000) << 24); - while (op1) { - mask = 0; - if (op1 & 1) { - mask |= 0xffff; - } - if (op1 & (1 << 8)) { - mask |= (0xffffU << 16); - } - if (op1 & (1 << 16)) { - mask |= (0xffffULL << 32); - } - if (op1 & (1 << 24)) { - mask |= (0xffffULL << 48); - } - result ^= op2ex & mask; - op1 = (op1 >> 1) & 0x7f7f7f7f; - op2ex <<= 1; - } - return result; -} - #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0 NEON_VOP(tst_u8, neon_u8, 4) NEON_VOP(tst_u16, neon_u16, 2) @@ -2207,33 +2120,3 @@ void HELPER(neon_zip16)(void *vd, void *vm) rm[0] = m0; rd[0] = d0; } - -/* Helper function for 64 bit polynomial multiply case: - * perform PolynomialMult(op1, op2) and return either the top or - * bottom half of the 128 bit result. - */ -uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2) -{ - int bitnum; - uint64_t res = 0; - - for (bitnum = 0; bitnum < 64; bitnum++) { - if (op1 & (1ULL << bitnum)) { - res ^= op2 << bitnum; - } - } - return res; -} -uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2) -{ - int bitnum; - uint64_t res = 0; - - /* bit 0 of op1 can't influence the high 64 bits at all */ - for (bitnum = 1; bitnum < 64; bitnum++) { - if (op1 & (1ULL << bitnum)) { - res ^= op2 >> (64 - bitnum); - } - } - return res; -} diff --git a/target/arm/pauth_helper.c b/target/arm/pauth_helper.c index 9746e32bf8..b909630317 100644 --- a/target/arm/pauth_helper.c +++ b/target/arm/pauth_helper.c @@ -320,7 +320,8 @@ static uint64_t pauth_addpac(CPUARMState *env, uint64_t ptr, uint64_t modifier, static uint64_t pauth_original_ptr(uint64_t ptr, ARMVAParameters param) { - uint64_t extfield = -param.select; + /* Note that bit 55 is used whether or not the regime has 2 ranges. */ + uint64_t extfield = sextract64(ptr, 55, 1); int bot_pac_bit = 64 - param.tsz; int top_pac_bit = 64 - 8 * param.tbi; diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index 7c26c3bfeb..596bf4cf73 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -6895,6 +6895,7 @@ static void disas_simd_ext(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_resh); + clear_vec_high(s, true, rd); } /* TBL/TBX @@ -6963,6 +6964,7 @@ static void disas_simd_tb(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_resh); + clear_vec_high(s, true, rd); } /* ZIP/UZP/TRN @@ -7052,6 +7054,7 @@ static void disas_simd_zip_trn(DisasContext *s, uint32_t insn) tcg_temp_free_i64(tcg_resl); write_vec_element(s, tcg_resh, rd, 1, MO_64); tcg_temp_free_i64(tcg_resh); + clear_vec_high(s, true, rd); } /* @@ -7409,6 +7412,9 @@ static void handle_simd_inse(DisasContext *s, int rd, int rn, write_vec_element(s, tmp, rd, dst_index, size); tcg_temp_free_i64(tmp); + + /* INS is considered a 128-bit write for SVE. */ + clear_vec_high(s, true, rd); } @@ -7438,6 +7444,9 @@ static void handle_simd_insg(DisasContext *s, int rd, int rn, int imm5) idx = extract32(imm5, 1 + size, 4 - size); write_vec_element(s, cpu_reg(s, rn), rd, idx, size); + + /* INS is considered a 128-bit write for SVE. */ + clear_vec_high(s, true, rd); } /* @@ -8735,9 +8744,9 @@ static void handle_3same_64(DisasContext *s, int opcode, bool u, break; case 0x8: /* SSHL, USHL */ if (u) { - gen_helper_neon_shl_u64(tcg_rd, tcg_rn, tcg_rm); + gen_ushl_i64(tcg_rd, tcg_rn, tcg_rm); } else { - gen_helper_neon_shl_s64(tcg_rd, tcg_rn, tcg_rm); + gen_sshl_i64(tcg_rd, tcg_rn, tcg_rm); } break; case 0x9: /* SQSHL, UQSHL */ @@ -10533,10 +10542,6 @@ static void handle_3rd_widening(DisasContext *s, int is_q, int is_u, int size, gen_helper_neon_addl_saturate_s32(tcg_passres, cpu_env, tcg_passres, tcg_passres); break; - case 14: /* PMULL */ - assert(size == 0); - gen_helper_neon_mull_p8(tcg_passres, tcg_op1, tcg_op2); - break; default: g_assert_not_reached(); } @@ -10648,30 +10653,6 @@ static void handle_3rd_narrowing(DisasContext *s, int is_q, int is_u, int size, clear_vec_high(s, is_q, rd); } -static void handle_pmull_64(DisasContext *s, int is_q, int rd, int rn, int rm) -{ - /* PMULL of 64 x 64 -> 128 is an odd special case because it - * is the only three-reg-diff instruction which produces a - * 128-bit wide result from a single operation. However since - * it's possible to calculate the two halves more or less - * separately we just use two helper calls. - */ - TCGv_i64 tcg_op1 = tcg_temp_new_i64(); - TCGv_i64 tcg_op2 = tcg_temp_new_i64(); - TCGv_i64 tcg_res = tcg_temp_new_i64(); - - read_vec_element(s, tcg_op1, rn, is_q, MO_64); - read_vec_element(s, tcg_op2, rm, is_q, MO_64); - gen_helper_neon_pmull_64_lo(tcg_res, tcg_op1, tcg_op2); - write_vec_element(s, tcg_res, rd, 0, MO_64); - gen_helper_neon_pmull_64_hi(tcg_res, tcg_op1, tcg_op2); - write_vec_element(s, tcg_res, rd, 1, MO_64); - - tcg_temp_free_i64(tcg_op1); - tcg_temp_free_i64(tcg_op2); - tcg_temp_free_i64(tcg_res); -} - /* AdvSIMD three different * 31 30 29 28 24 23 22 21 20 16 15 12 11 10 9 5 4 0 * +---+---+---+-----------+------+---+------+--------+-----+------+------+ @@ -10724,11 +10705,21 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) handle_3rd_narrowing(s, is_q, is_u, size, opcode, rd, rn, rm); break; case 14: /* PMULL, PMULL2 */ - if (is_u || size == 1 || size == 2) { + if (is_u) { unallocated_encoding(s); return; } - if (size == 3) { + switch (size) { + case 0: /* PMULL.P8 */ + if (!fp_access_check(s)) { + return; + } + /* The Q field specifies lo/hi half input for this insn. */ + gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, + gen_helper_neon_pmull_h); + break; + + case 3: /* PMULL.P64 */ if (!dc_isar_feature(aa64_pmull, s)) { unallocated_encoding(s); return; @@ -10736,10 +10727,16 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) if (!fp_access_check(s)) { return; } - handle_pmull_64(s, is_q, rd, rn, rm); - return; + /* The Q field specifies lo/hi half input for this insn. */ + gen_gvec_op3_ool(s, true, rd, rn, rm, is_q, + gen_helper_gvec_pmull_q); + break; + + default: + unallocated_encoding(s); + break; } - goto is_widening; + return; case 9: /* SQDMLAL, SQDMLAL2 */ case 11: /* SQDMLSL, SQDMLSL2 */ case 13: /* SQDMULL, SQDMULL2 */ @@ -10760,7 +10757,6 @@ static void disas_simd_three_reg_diff(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } - is_widening: if (!fp_access_check(s)) { return; } @@ -11132,6 +11128,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) is_q ? 16 : 8, vec_full_reg_size(s), (u ? uqsub_op : sqsub_op) + size); return; + case 0x08: /* SSHL, USHL */ + gen_gvec_op3(s, is_q, rd, rn, rm, + u ? &ushl_op[size] : &sshl_op[size]); + return; case 0x0c: /* SMAX, UMAX */ if (u) { gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_umax, size); @@ -11156,9 +11156,10 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) case 0x13: /* MUL, PMUL */ if (!u) { /* MUL */ gen_gvec_fn3(s, is_q, rd, rn, rm, tcg_gen_gvec_mul, size); - return; + } else { /* PMUL */ + gen_gvec_op3_ool(s, is_q, rd, rn, rm, 0, gen_helper_gvec_pmul_b); } - break; + return; case 0x12: /* MLA, MLS */ if (u) { gen_gvec_op3(s, is_q, rd, rn, rm, &mls_op[size]); @@ -11247,16 +11248,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn = fns[size][u]; break; } - case 0x8: /* SSHL, USHL */ - { - static NeonGenTwoOpFn * const fns[3][2] = { - { gen_helper_neon_shl_s8, gen_helper_neon_shl_u8 }, - { gen_helper_neon_shl_s16, gen_helper_neon_shl_u16 }, - { gen_helper_neon_shl_s32, gen_helper_neon_shl_u32 }, - }; - genfn = fns[size][u]; - break; - } case 0x9: /* SQSHL, UQSHL */ { static NeonGenTwoOpEnvFn * const fns[3][2] = { @@ -11298,11 +11289,6 @@ static void disas_simd_3same_int(DisasContext *s, uint32_t insn) genfn = fns[size][u]; break; } - case 0x13: /* MUL, PMUL */ - assert(u); /* PMUL */ - assert(size == 0); - genfn = gen_helper_neon_mul_p8; - break; case 0x16: /* SQDMULH, SQRDMULH */ { static NeonGenTwoOpEnvFn * const fns[2][2] = { diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index bf90ac0e5b..ba46e2557a 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -201,7 +201,7 @@ static bool trans_VSEL(DisasContext *s, arg_VSEL *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && + if (dp && !dc_isar_feature(aa32_simd_r32, s) && ((a->vm | a->vn | a->vd) & 0x10)) { return false; } @@ -334,7 +334,7 @@ static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && + if (dp && !dc_isar_feature(aa32_simd_r32, s) && ((a->vm | a->vn | a->vd) & 0x10)) { return false; } @@ -420,7 +420,7 @@ static bool trans_VRINT(DisasContext *s, arg_VRINT *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && + if (dp && !dc_isar_feature(aa32_simd_r32, s) && ((a->vm | a->vd) & 0x10)) { return false; } @@ -484,7 +484,7 @@ static bool trans_VCVT(DisasContext *s, arg_VCVT *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (dp && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } @@ -556,7 +556,7 @@ static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a) uint32_t offset; /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } @@ -615,7 +615,7 @@ static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a) uint32_t offset; /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } @@ -662,7 +662,7 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) { return false; } @@ -912,7 +912,7 @@ static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a) */ /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } @@ -978,7 +978,7 @@ static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a) TCGv_i64 tmp; /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } @@ -1101,7 +1101,7 @@ static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) { return false; } @@ -1309,7 +1309,7 @@ static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn, TCGv_ptr fpst; /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) { return false; } @@ -1458,7 +1458,7 @@ static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm) TCGv_i64 f0, fd; /* UNDEF accesses to D16-D31 if they don't exist */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) { return false; } @@ -1822,7 +1822,8 @@ static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && + ((a->vd | a->vn | a->vm) & 0x10)) { return false; } @@ -1921,7 +1922,7 @@ static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a) vd = a->vd; /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) { return false; } @@ -2065,7 +2066,7 @@ static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } @@ -2138,7 +2139,7 @@ static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } @@ -2204,7 +2205,7 @@ static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } @@ -2264,7 +2265,7 @@ static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } @@ -2325,7 +2326,7 @@ static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } @@ -2384,7 +2385,7 @@ static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) { return false; } @@ -2412,7 +2413,7 @@ static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a) TCGv_i32 vm; /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } @@ -2440,7 +2441,7 @@ static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a) TCGv_i32 vd; /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } @@ -2494,7 +2495,7 @@ static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a) TCGv_ptr fpst; /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } @@ -2534,7 +2535,7 @@ static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } @@ -2627,7 +2628,7 @@ static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a) } /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) { return false; } @@ -2723,7 +2724,7 @@ static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a) TCGv_ptr fpst; /* UNDEF accesses to D16-D31 if they don't exist. */ - if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) { + if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) { return false; } diff --git a/target/arm/translate.c b/target/arm/translate.c index 20f89ace2f..79880adaad 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -42,7 +42,7 @@ #define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5) /* currently all emulated v5 cores are also v5TE, so don't bother */ #define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5) -#define ENABLE_ARCH_5J dc_isar_feature(jazelle, s) +#define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s) #define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6) #define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K) #define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2) @@ -2612,7 +2612,7 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn) #define VFP_SREG(insn, bigbit, smallbit) \ ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1)) #define VFP_DREG(reg, insn, bigbit, smallbit) do { \ - if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \ + if (dc_isar_feature(aa32_simd_r32, s)) { \ reg = (((insn) >> (bigbit)) & 0x0f) \ | (((insn) >> ((smallbit) - 4)) & 0x10); \ } else { \ @@ -3575,13 +3575,13 @@ static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift, if (u) { switch (size) { case 1: gen_helper_neon_shl_u16(var, var, shift); break; - case 2: gen_helper_neon_shl_u32(var, var, shift); break; + case 2: gen_ushl_i32(var, var, shift); break; default: abort(); } } else { switch (size) { case 1: gen_helper_neon_shl_s16(var, var, shift); break; - case 2: gen_helper_neon_shl_s32(var, var, shift); break; + case 2: gen_sshl_i32(var, var, shift); break; default: abort(); } } @@ -4384,6 +4384,280 @@ const GVecGen3 cmtst_op[4] = { .vece = MO_64 }, }; +void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) +{ + TCGv_i32 lval = tcg_temp_new_i32(); + TCGv_i32 rval = tcg_temp_new_i32(); + TCGv_i32 lsh = tcg_temp_new_i32(); + TCGv_i32 rsh = tcg_temp_new_i32(); + TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 max = tcg_const_i32(32); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i32(lsh, shift); + tcg_gen_neg_i32(rsh, lsh); + tcg_gen_shl_i32(lval, src, lsh); + tcg_gen_shr_i32(rval, src, rsh); + tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero); + tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst); + + tcg_temp_free_i32(lval); + tcg_temp_free_i32(rval); + tcg_temp_free_i32(lsh); + tcg_temp_free_i32(rsh); + tcg_temp_free_i32(zero); + tcg_temp_free_i32(max); +} + +void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) +{ + TCGv_i64 lval = tcg_temp_new_i64(); + TCGv_i64 rval = tcg_temp_new_i64(); + TCGv_i64 lsh = tcg_temp_new_i64(); + TCGv_i64 rsh = tcg_temp_new_i64(); + TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 max = tcg_const_i64(64); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i64(lsh, shift); + tcg_gen_neg_i64(rsh, lsh); + tcg_gen_shl_i64(lval, src, lsh); + tcg_gen_shr_i64(rval, src, rsh); + tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero); + tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst); + + tcg_temp_free_i64(lval); + tcg_temp_free_i64(rval); + tcg_temp_free_i64(lsh); + tcg_temp_free_i64(rsh); + tcg_temp_free_i64(zero); + tcg_temp_free_i64(max); +} + +static void gen_ushl_vec(unsigned vece, TCGv_vec dst, + TCGv_vec src, TCGv_vec shift) +{ + TCGv_vec lval = tcg_temp_new_vec_matching(dst); + TCGv_vec rval = tcg_temp_new_vec_matching(dst); + TCGv_vec lsh = tcg_temp_new_vec_matching(dst); + TCGv_vec rsh = tcg_temp_new_vec_matching(dst); + TCGv_vec msk, max; + + tcg_gen_neg_vec(vece, rsh, shift); + if (vece == MO_8) { + tcg_gen_mov_vec(lsh, shift); + } else { + msk = tcg_temp_new_vec_matching(dst); + tcg_gen_dupi_vec(vece, msk, 0xff); + tcg_gen_and_vec(vece, lsh, shift, msk); + tcg_gen_and_vec(vece, rsh, rsh, msk); + tcg_temp_free_vec(msk); + } + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_shlv_vec(vece, lval, src, lsh); + tcg_gen_shrv_vec(vece, rval, src, rsh); + + max = tcg_temp_new_vec_matching(dst); + tcg_gen_dupi_vec(vece, max, 8 << vece); + + /* + * The choice of LT (signed) and GEU (unsigned) are biased toward + * the instructions of the x86_64 host. For MO_8, the whole byte + * is significant so we must use an unsigned compare; otherwise we + * have already masked to a byte and so a signed compare works. + * Other tcg hosts have a full set of comparisons and do not care. + */ + if (vece == MO_8) { + tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max); + tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max); + tcg_gen_andc_vec(vece, lval, lval, lsh); + tcg_gen_andc_vec(vece, rval, rval, rsh); + } else { + tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max); + tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max); + tcg_gen_and_vec(vece, lval, lval, lsh); + tcg_gen_and_vec(vece, rval, rval, rsh); + } + tcg_gen_or_vec(vece, dst, lval, rval); + + tcg_temp_free_vec(max); + tcg_temp_free_vec(lval); + tcg_temp_free_vec(rval); + tcg_temp_free_vec(lsh); + tcg_temp_free_vec(rsh); +} + +static const TCGOpcode ushl_list[] = { + INDEX_op_neg_vec, INDEX_op_shlv_vec, + INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0 +}; + +const GVecGen3 ushl_op[4] = { + { .fniv = gen_ushl_vec, + .fno = gen_helper_gvec_ushl_b, + .opt_opc = ushl_list, + .vece = MO_8 }, + { .fniv = gen_ushl_vec, + .fno = gen_helper_gvec_ushl_h, + .opt_opc = ushl_list, + .vece = MO_16 }, + { .fni4 = gen_ushl_i32, + .fniv = gen_ushl_vec, + .opt_opc = ushl_list, + .vece = MO_32 }, + { .fni8 = gen_ushl_i64, + .fniv = gen_ushl_vec, + .opt_opc = ushl_list, + .vece = MO_64 }, +}; + +void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift) +{ + TCGv_i32 lval = tcg_temp_new_i32(); + TCGv_i32 rval = tcg_temp_new_i32(); + TCGv_i32 lsh = tcg_temp_new_i32(); + TCGv_i32 rsh = tcg_temp_new_i32(); + TCGv_i32 zero = tcg_const_i32(0); + TCGv_i32 max = tcg_const_i32(31); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i32(lsh, shift); + tcg_gen_neg_i32(rsh, lsh); + tcg_gen_shl_i32(lval, src, lsh); + tcg_gen_umin_i32(rsh, rsh, max); + tcg_gen_sar_i32(rval, src, rsh); + tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero); + tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval); + + tcg_temp_free_i32(lval); + tcg_temp_free_i32(rval); + tcg_temp_free_i32(lsh); + tcg_temp_free_i32(rsh); + tcg_temp_free_i32(zero); + tcg_temp_free_i32(max); +} + +void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift) +{ + TCGv_i64 lval = tcg_temp_new_i64(); + TCGv_i64 rval = tcg_temp_new_i64(); + TCGv_i64 lsh = tcg_temp_new_i64(); + TCGv_i64 rsh = tcg_temp_new_i64(); + TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 max = tcg_const_i64(63); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_ext8s_i64(lsh, shift); + tcg_gen_neg_i64(rsh, lsh); + tcg_gen_shl_i64(lval, src, lsh); + tcg_gen_umin_i64(rsh, rsh, max); + tcg_gen_sar_i64(rval, src, rsh); + tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero); + tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval); + + tcg_temp_free_i64(lval); + tcg_temp_free_i64(rval); + tcg_temp_free_i64(lsh); + tcg_temp_free_i64(rsh); + tcg_temp_free_i64(zero); + tcg_temp_free_i64(max); +} + +static void gen_sshl_vec(unsigned vece, TCGv_vec dst, + TCGv_vec src, TCGv_vec shift) +{ + TCGv_vec lval = tcg_temp_new_vec_matching(dst); + TCGv_vec rval = tcg_temp_new_vec_matching(dst); + TCGv_vec lsh = tcg_temp_new_vec_matching(dst); + TCGv_vec rsh = tcg_temp_new_vec_matching(dst); + TCGv_vec tmp = tcg_temp_new_vec_matching(dst); + + /* + * Rely on the TCG guarantee that out of range shifts produce + * unspecified results, not undefined behaviour (i.e. no trap). + * Discard out-of-range results after the fact. + */ + tcg_gen_neg_vec(vece, rsh, shift); + if (vece == MO_8) { + tcg_gen_mov_vec(lsh, shift); + } else { + tcg_gen_dupi_vec(vece, tmp, 0xff); + tcg_gen_and_vec(vece, lsh, shift, tmp); + tcg_gen_and_vec(vece, rsh, rsh, tmp); + } + + /* Bound rsh so out of bound right shift gets -1. */ + tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1); + tcg_gen_umin_vec(vece, rsh, rsh, tmp); + tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp); + + tcg_gen_shlv_vec(vece, lval, src, lsh); + tcg_gen_sarv_vec(vece, rval, src, rsh); + + /* Select in-bound left shift. */ + tcg_gen_andc_vec(vece, lval, lval, tmp); + + /* Select between left and right shift. */ + if (vece == MO_8) { + tcg_gen_dupi_vec(vece, tmp, 0); + tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval); + } else { + tcg_gen_dupi_vec(vece, tmp, 0x80); + tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval); + } + + tcg_temp_free_vec(lval); + tcg_temp_free_vec(rval); + tcg_temp_free_vec(lsh); + tcg_temp_free_vec(rsh); + tcg_temp_free_vec(tmp); +} + +static const TCGOpcode sshl_list[] = { + INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec, + INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0 +}; + +const GVecGen3 sshl_op[4] = { + { .fniv = gen_sshl_vec, + .fno = gen_helper_gvec_sshl_b, + .opt_opc = sshl_list, + .vece = MO_8 }, + { .fniv = gen_sshl_vec, + .fno = gen_helper_gvec_sshl_h, + .opt_opc = sshl_list, + .vece = MO_16 }, + { .fni4 = gen_sshl_i32, + .fniv = gen_sshl_vec, + .opt_opc = sshl_list, + .vece = MO_32 }, + { .fni8 = gen_sshl_i64, + .fniv = gen_sshl_vec, + .opt_opc = sshl_list, + .vece = MO_64 }, +}; + static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat, TCGv_vec a, TCGv_vec b) { @@ -4733,16 +5007,17 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case NEON_3R_VMUL: /* VMUL */ if (u) { - /* Polynomial case allows only P8 and is handled below. */ + /* Polynomial case allows only P8. */ if (size != 0) { return 1; } + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, + 0, gen_helper_gvec_pmul_b); } else { tcg_gen_gvec_mul(size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size); - return 0; } - break; + return 0; case NEON_3R_VML: /* VMLA, VMLS */ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size, @@ -4787,6 +5062,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) vec_size, vec_size); } return 0; + + case NEON_3R_VSHL: + /* Note the operation is vshl vd,vm,vn */ + tcg_gen_gvec_3(rd_ofs, rm_ofs, rn_ofs, vec_size, vec_size, + u ? &ushl_op[size] : &sshl_op[size]); + return 0; } if (size == 3) { @@ -4795,13 +5076,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) neon_load_reg64(cpu_V0, rn + pass); neon_load_reg64(cpu_V1, rm + pass); switch (op) { - case NEON_3R_VSHL: - if (u) { - gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0); - } else { - gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0); - } - break; case NEON_3R_VQSHL: if (u) { gen_helper_neon_qshl_u64(cpu_V0, cpu_env, @@ -4836,7 +5110,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } pairwise = 0; switch (op) { - case NEON_3R_VSHL: case NEON_3R_VQSHL: case NEON_3R_VRSHL: case NEON_3R_VQRSHL: @@ -4916,9 +5189,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case NEON_3R_VHSUB: GEN_NEON_INTEGER_OP(hsub); break; - case NEON_3R_VSHL: - GEN_NEON_INTEGER_OP(shl); - break; case NEON_3R_VQSHL: GEN_NEON_INTEGER_OP_ENV(qshl); break; @@ -4937,10 +5207,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) tmp2 = neon_load_reg(rd, pass); gen_neon_add(size, tmp, tmp2); break; - case NEON_3R_VMUL: - /* VMUL.P8; other cases already eliminated. */ - gen_helper_neon_mul_p8(tmp, tmp, tmp2); - break; case NEON_3R_VPMAX: GEN_NEON_INTEGER_OP(pmax); break; @@ -5327,9 +5593,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) } } else { if (input_unsigned) { - gen_helper_neon_shl_u64(cpu_V0, in, tmp64); + gen_ushl_i64(cpu_V0, in, tmp64); } else { - gen_helper_neon_shl_s64(cpu_V0, in, tmp64); + gen_sshl_i64(cpu_V0, in, tmp64); } } tmp = tcg_temp_new_i32(); @@ -5600,27 +5866,20 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) return 1; } - /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply) - * outside the loop below as it only performs a single pass. - */ - if (op == 14 && size == 2) { - TCGv_i64 tcg_rn, tcg_rm, tcg_rd; - - if (!dc_isar_feature(aa32_pmull, s)) { - return 1; + /* Handle polynomial VMULL in a single pass. */ + if (op == 14) { + if (size == 0) { + /* VMULL.P8 */ + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16, + 0, gen_helper_neon_pmull_h); + } else { + /* VMULL.P64 */ + if (!dc_isar_feature(aa32_pmull, s)) { + return 1; + } + tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, 16, 16, + 0, gen_helper_gvec_pmull_q); } - tcg_rn = tcg_temp_new_i64(); - tcg_rm = tcg_temp_new_i64(); - tcg_rd = tcg_temp_new_i64(); - neon_load_reg64(tcg_rn, rn); - neon_load_reg64(tcg_rm, rm); - gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm); - neon_store_reg64(tcg_rd, rd); - gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm); - neon_store_reg64(tcg_rd, rd + 1); - tcg_temp_free_i64(tcg_rn); - tcg_temp_free_i64(tcg_rm); - tcg_temp_free_i64(tcg_rd); return 0; } @@ -5698,11 +5957,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */ gen_neon_mull(cpu_V0, tmp, tmp2, size, u); break; - case 14: /* Polynomial VMULL */ - gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2); - tcg_temp_free_i32(tmp2); - tcg_temp_free_i32(tmp); - break; default: /* 15 is RESERVED: caught earlier */ abort(); } @@ -9845,8 +10099,8 @@ static bool op_div(DisasContext *s, arg_rrr *a, bool u) TCGv_i32 t1, t2; if (s->thumb - ? !dc_isar_feature(thumb_div, s) - : !dc_isar_feature(arm_div, s)) { + ? !dc_isar_feature(aa32_thumb_div, s) + : !dc_isar_feature(aa32_arm_div, s)) { return false; } diff --git a/target/arm/translate.h b/target/arm/translate.h index 5b167c416a..d9ea0c99cc 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -278,6 +278,8 @@ uint64_t vfp_expand_imm(int size, uint8_t imm8); extern const GVecGen3 mla_op[4]; extern const GVecGen3 mls_op[4]; extern const GVecGen3 cmtst_op[4]; +extern const GVecGen3 sshl_op[4]; +extern const GVecGen3 ushl_op[4]; extern const GVecGen2i ssra_op[4]; extern const GVecGen2i usra_op[4]; extern const GVecGen2i sri_op[4]; @@ -287,6 +289,10 @@ extern const GVecGen4 sqadd_op[4]; extern const GVecGen4 uqsub_op[4]; extern const GVecGen4 sqsub_op[4]; void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); +void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b); +void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); +void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b); /* * Forward to the isar_feature_* tests given a DisasContext pointer. diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index dedef62403..8017bd88c4 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -1046,3 +1046,214 @@ void HELPER(gvec_fmlal_idx_a64)(void *vd, void *vn, void *vm, do_fmlal_idx(vd, vn, vm, &env->vfp.fp_status, desc, get_flush_inputs_to_zero(&env->vfp.fp_status_f16)); } + +void HELPER(gvec_sshl_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + int8_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz; ++i) { + int8_t mm = m[i]; + int8_t nn = n[i]; + int8_t res = 0; + if (mm >= 0) { + if (mm < 8) { + res = nn << mm; + } + } else { + res = nn >> (mm > -8 ? -mm : 7); + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sshl_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + int16_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 2; ++i) { + int8_t mm = m[i]; /* only 8 bits of shift are significant */ + int16_t nn = n[i]; + int16_t res = 0; + if (mm >= 0) { + if (mm < 16) { + res = nn << mm; + } + } else { + res = nn >> (mm > -16 ? -mm : 15); + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_ushl_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint8_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz; ++i) { + int8_t mm = m[i]; + uint8_t nn = n[i]; + uint8_t res = 0; + if (mm >= 0) { + if (mm < 8) { + res = nn << mm; + } + } else { + if (mm > -8) { + res = nn >> -mm; + } + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_ushl_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc); + uint16_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 2; ++i) { + int8_t mm = m[i]; /* only 8 bits of shift are significant */ + uint16_t nn = n[i]; + uint16_t res = 0; + if (mm >= 0) { + if (mm < 16) { + res = nn << mm; + } + } else { + if (mm > -16) { + res = nn >> -mm; + } + } + d[i] = res; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 8x8->8 polynomial multiply. + * + * Polynomial multiplication is like integer multiplication except the + * partial products are XORed, not added. + * + * TODO: expose this as a generic vector operation, as it is a common + * crypto building block. + */ +void HELPER(gvec_pmul_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + uint64_t nn = n[i]; + uint64_t mm = m[i]; + uint64_t rr = 0; + + for (j = 0; j < 8; ++j) { + uint64_t mask = (nn & 0x0101010101010101ull) * 0xff; + rr ^= mm & mask; + mm = (mm << 1) & 0xfefefefefefefefeull; + nn >>= 1; + } + d[i] = rr; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 64x64->128 polynomial multiply. + * Because of the lanes are not accessed in strict columns, + * this probably cannot be turned into a generic helper. + */ +void HELPER(gvec_pmull_q)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, j, opr_sz = simd_oprsz(desc); + intptr_t hi = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; i += 2) { + uint64_t nn = n[i + hi]; + uint64_t mm = m[i + hi]; + uint64_t rhi = 0; + uint64_t rlo = 0; + + /* Bit 0 can only influence the low 64-bit result. */ + if (nn & 1) { + rlo = mm; + } + + for (j = 1; j < 64; ++j) { + uint64_t mask = -((nn >> j) & 1); + rlo ^= (mm << j) & mask; + rhi ^= (mm >> (64 - j)) & mask; + } + d[i] = rlo; + d[i + 1] = rhi; + } + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +/* + * 8x8->16 polynomial multiply. + * + * The byte inputs are expanded to (or extracted from) half-words. + * Note that neon and sve2 get the inputs from different positions. + * This allows 4 bytes to be processed in parallel with uint64_t. + */ + +static uint64_t expand_byte_to_half(uint64_t x) +{ + return (x & 0x000000ff) + | ((x & 0x0000ff00) << 8) + | ((x & 0x00ff0000) << 16) + | ((x & 0xff000000) << 24); +} + +static uint64_t pmull_h(uint64_t op1, uint64_t op2) +{ + uint64_t result = 0; + int i; + + for (i = 0; i < 8; ++i) { + uint64_t mask = (op1 & 0x0001000100010001ull) * 0xffff; + result ^= op2 & mask; + op1 >>= 1; + op2 <<= 1; + } + return result; +} + +void HELPER(neon_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + int hi = simd_data(desc); + uint64_t *d = vd, *n = vn, *m = vm; + uint64_t nn = n[hi], mm = m[hi]; + + d[0] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); + nn >>= 32; + mm >>= 32; + d[1] = pmull_h(expand_byte_to_half(nn), expand_byte_to_half(mm)); + + clear_tail(d, 16, simd_maxsz(desc)); +} + +#ifdef TARGET_AARCH64 +void HELPER(sve2_pmull_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + int shift = simd_data(desc) * 8; + intptr_t i, opr_sz = simd_oprsz(desc); + uint64_t *d = vd, *n = vn, *m = vm; + + for (i = 0; i < opr_sz / 8; ++i) { + uint64_t nn = (n[i] >> shift) & 0x00ff00ff00ff00ffull; + uint64_t mm = (m[i] >> shift) & 0x00ff00ff00ff00ffull; + + d[i] = pmull_h(nn, mm); + } +} +#endif diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 0ae7d4f34a..930d6e747f 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -185,7 +185,7 @@ uint32_t vfp_get_fpscr(CPUARMState *env) void HELPER(vfp_set_fpscr)(CPUARMState *env, uint32_t val) { /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ - if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) { + if (!cpu_isar_feature(any_fp16, env_archcpu(env))) { val &= ~FPCR_FZ16; } diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 3a1eb76004..b283042515 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -23,8 +23,6 @@ #include "qemu/int128.h" #include "exec/cpu-defs.h" #include "cpu-qom.h" -#include "exec/cpu-defs.h" -#include "cpu-qom.h" /* #define PPC_EMULATE_32BITS_HYPV */ @@ -962,117 +960,88 @@ struct ppc_radix_page_info { #define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 struct CPUPPCState { - /* - * First are the most commonly used resources during translated - * code execution - */ - /* general purpose registers */ - target_ulong gpr[32]; - /* Storage for GPR MSB, used by the SPE extension */ - target_ulong gprh[32]; - /* LR */ + /* Most commonly used resources during translated code execution first */ + target_ulong gpr[32]; /* general purpose registers */ + target_ulong gprh[32]; /* storage for GPR MSB, used by the SPE extension */ target_ulong lr; - /* CTR */ target_ulong ctr; - /* condition register */ - uint32_t crf[8]; + uint32_t crf[8]; /* condition register */ #if defined(TARGET_PPC64) - /* CFAR */ target_ulong cfar; #endif - /* XER (with SO, OV, CA split out) */ - target_ulong xer; + target_ulong xer; /* XER (with SO, OV, CA split out) */ target_ulong so; target_ulong ov; target_ulong ca; target_ulong ov32; target_ulong ca32; - /* Reservation address */ - target_ulong reserve_addr; - /* Reservation value */ - target_ulong reserve_val; - target_ulong reserve_val2; - - /* Those ones are used in supervisor mode only */ - /* machine state register */ - target_ulong msr; - /* temporary general purpose registers */ - target_ulong tgpr[4]; /* Used to speed-up TLB assist handlers */ - /* Floating point execution context */ - float_status fp_status; - /* floating point status and control register */ - target_ulong fpscr; + target_ulong reserve_addr; /* Reservation address */ + target_ulong reserve_val; /* Reservation value */ + target_ulong reserve_val2; - /* Next instruction pointer */ - target_ulong nip; + /* These are used in supervisor mode only */ + target_ulong msr; /* machine state register */ + target_ulong tgpr[4]; /* temporary general purpose registers, */ + /* used to speed-up TLB assist handlers */ - /* High part of 128-bit helper return. */ - uint64_t retxh; + target_ulong nip; /* next instruction pointer */ + uint64_t retxh; /* high part of 128-bit helper return */ /* when a memory exception occurs, the access type is stored here */ int access_type; - /* MMU context - only relevant for full system emulation */ #if !defined(CONFIG_USER_ONLY) + /* MMU context, only relevant for full system emulation */ #if defined(TARGET_PPC64) - /* PowerPC 64 SLB area */ - ppc_slb_t slb[MAX_SLB_ENTRIES]; - /* tcg TLB needs flush (deferred slb inval instruction typically) */ + ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */ #endif - /* segment registers */ - target_ulong sr[32]; - /* BATs */ - uint32_t nb_BATs; + target_ulong sr[32]; /* segment registers */ + uint32_t nb_BATs; /* number of BATs */ target_ulong DBAT[2][8]; target_ulong IBAT[2][8]; /* PowerPC TLB registers (for 4xx, e500 and 60x software driven TLBs) */ - int32_t nb_tlb; /* Total number of TLB */ + int32_t nb_tlb; /* Total number of TLB */ int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */ - int nb_ways; /* Number of ways in the TLB set */ - int last_way; /* Last used way used to allocate TLB in a LRU way */ + int nb_ways; /* Number of ways in the TLB set */ + int last_way; /* Last used way used to allocate TLB in a LRU way */ int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */ - int nb_pids; /* Number of available PID registers */ - int tlb_type; /* Type of TLB we're dealing with */ - ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ - /* 403 dedicated access protection registers */ - target_ulong pb[4]; - bool tlb_dirty; /* Set to non-zero when modifying TLB */ - bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ + int nb_pids; /* Number of available PID registers */ + int tlb_type; /* Type of TLB we're dealing with */ + ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ + target_ulong pb[4]; /* 403 dedicated access protection registers */ + bool tlb_dirty; /* Set to non-zero when modifying TLB */ + bool kvm_sw_tlb; /* non-zero if KVM SW TLB API is active */ uint32_t tlb_need_flush; /* Delayed flush needed */ #define TLB_NEED_LOCAL_FLUSH 0x1 #define TLB_NEED_GLOBAL_FLUSH 0x2 #endif /* Other registers */ - /* Special purpose registers */ - target_ulong spr[1024]; + target_ulong spr[1024]; /* special purpose registers */ ppc_spr_t spr_cb[1024]; - /* Vector status and control register, minus VSCR_SAT. */ + /* Vector status and control register, minus VSCR_SAT */ uint32_t vscr; /* VSX registers (including FP and AVR) */ ppc_vsr_t vsr[64] QEMU_ALIGNED(16); - /* Non-zero if and only if VSCR_SAT should be set. */ + /* Non-zero if and only if VSCR_SAT should be set */ ppc_vsr_t vscr_sat QEMU_ALIGNED(16); /* SPE registers */ uint64_t spe_acc; uint32_t spe_fscr; - /* - * SPE and Altivec can share a status since they will never be - * used simultaneously - */ + /* SPE and Altivec share status as they'll never be used simultaneously */ float_status vec_status; + float_status fp_status; /* Floating point execution context */ + target_ulong fpscr; /* Floating point status and control register */ /* Internal devices resources */ - /* Time base and decrementer */ - ppc_tb_t *tb_env; - /* Device control registers */ - ppc_dcr_t *dcr_env; + ppc_tb_t *tb_env; /* Time base and decrementer */ + ppc_dcr_t *dcr_env; /* Device control registers */ int dcache_line_size; int icache_line_size; - /* Those resources are used during exception processing */ + /* These resources are used during exception processing */ /* CPU model definition */ target_ulong msr_mask; powerpc_mmu_t mmu_model; @@ -1091,58 +1060,49 @@ struct CPUPPCState { uint32_t pending_interrupts; #if !defined(CONFIG_USER_ONLY) /* - * This is the IRQ controller, which is implementation dependent - * and only relevant when emulating a complete machine. Note that - * this isn't used by recent Book3s compatible CPUs (POWER7 and - * newer). + * This is the IRQ controller, which is implementation dependent and only + * relevant when emulating a complete machine. Note that this isn't used + * by recent Book3s compatible CPUs (POWER7 and newer). */ uint32_t irq_input_state; void **irq_inputs; - /* Exception vectors */ - target_ulong excp_vectors[POWERPC_EXCP_NB]; + + target_ulong excp_vectors[POWERPC_EXCP_NB]; /* Exception vectors */ target_ulong excp_prefix; target_ulong ivor_mask; target_ulong ivpr_mask; target_ulong hreset_vector; hwaddr mpic_iack; - /* true when the external proxy facility mode is enabled */ - bool mpic_proxy; - /* - * set when the processor has an HV mode, thus HV priv - * instructions and SPRs are diallowed if MSR:HV is 0 - */ - bool has_hv_mode; - + bool mpic_proxy; /* true if the external proxy facility mode is enabled */ + bool has_hv_mode; /* set when the processor has an HV mode, thus HV priv */ + /* instructions and SPRs are diallowed if MSR:HV is 0 */ /* - * On P7/P8/P9, set when in PM state, we need to handle resume in - * a special way (such as routing some resume causes to 0x100, ie, - * sreset), so flag this here. + * On P7/P8/P9, set when in PM state so we need to handle resume in a + * special way (such as routing some resume causes to 0x100, i.e. sreset). */ bool resume_as_sreset; #endif - /* Those resources are used only in QEMU core */ - target_ulong hflags; /* hflags is a MSR & HFLAGS_MASK */ + /* These resources are used only in QEMU core */ + target_ulong hflags; /* hflags is MSR & HFLAGS_MASK */ target_ulong hflags_nmsr; /* specific hflags, not coming from MSR */ - int immu_idx; /* precomputed MMU index to speed up insn access */ - int dmmu_idx; /* precomputed MMU index to speed up data accesses */ + int immu_idx; /* precomputed MMU index to speed up insn accesses */ + int dmmu_idx; /* precomputed MMU index to speed up data accesses */ /* Power management */ int (*check_pow)(CPUPPCState *env); #if !defined(CONFIG_USER_ONLY) - void *load_info; /* Holds boot loading state. */ + void *load_info; /* holds boot loading state */ #endif /* booke timers */ /* - * Specifies bit locations of the Time Base used to signal a fixed - * timer exception on a transition from 0 to 1. (watchdog or - * fixed-interval timer) + * Specifies bit locations of the Time Base used to signal a fixed timer + * exception on a transition from 0 to 1 (watchdog or fixed-interval timer) * - * 0 selects the least significant bit. - * 63 selects the most significant bit. + * 0 selects the least significant bit, 63 selects the most significant bit */ uint8_t fit_period[4]; uint8_t wdt_period[4]; diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index dc383242f7..ae43b08eb5 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -293,7 +293,7 @@ static void float_invalid_op_vxvc(CPUPPCState *env, bool set_fpcc, env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; /* Update the floating-point enabled exception summary */ env->fpscr |= FP_FEX; - /* Exception is differed */ + /* Exception is deferred */ } } @@ -644,7 +644,7 @@ static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) if (cs->exception_index == POWERPC_EXCP_PROGRAM && (env->error_code & POWERPC_EXCP_FP)) { - /* Differred floating-point exception after target FPR update */ + /* Deferred floating-point exception after target FPR update */ if (fp_exceptions_enabled(env)) { raise_exception_err_ra(env, cs->exception_index, env->error_code, raddr); diff --git a/target/ppc/translate/fp-impl.inc.c b/target/ppc/translate/fp-impl.inc.c index d8e27bf4d5..9f7868ee28 100644 --- a/target/ppc/translate/fp-impl.inc.c +++ b/target/ppc/translate/fp-impl.inc.c @@ -781,7 +781,7 @@ static void gen_mtfsb1(DisasContext *ctx) tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } - /* We can raise a differed exception */ + /* We can raise a deferred exception */ gen_helper_float_check_status(cpu_env); } @@ -817,7 +817,7 @@ static void gen_mtfsf(DisasContext *ctx) tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } - /* We can raise a differed exception */ + /* We can raise a deferred exception */ gen_helper_float_check_status(cpu_env); tcg_temp_free_i64(t1); } @@ -850,7 +850,7 @@ static void gen_mtfsfi(DisasContext *ctx) tcg_gen_trunc_tl_i32(cpu_crf[1], cpu_fpscr); tcg_gen_shri_i32(cpu_crf[1], cpu_crf[1], FPSCR_OX); } - /* We can raise a differed exception */ + /* We can raise a deferred exception */ gen_helper_float_check_status(cpu_env); } diff --git a/tests/qemu-iotests/122 b/tests/qemu-iotests/122 index dfa350936f..f7a3ae684a 100755 --- a/tests/qemu-iotests/122 +++ b/tests/qemu-iotests/122 @@ -276,6 +276,20 @@ $QEMU_IMG convert -O $IMGFMT -n "$TEST_IMG" "$TEST_IMG".orig $QEMU_IMG compare "$TEST_IMG" "$TEST_IMG".orig +echo +echo '=== -n -B to an image without a backing file ===' +echo + +# Base for the output +TEST_IMG="$TEST_IMG".base _make_test_img 64M + +# Output that does have $TEST_IMG.base set as its (implicit) backing file +TEST_IMG="$TEST_IMG".orig _make_test_img 64M + +# Convert with -n, which should not confuse -B with "target BDS has a +# backing file" +$QEMU_IMG convert -O $IMGFMT -B "$TEST_IMG".base -n "$TEST_IMG" "$TEST_IMG".orig + # success, all done echo '*** done' rm -f $seq.full diff --git a/tests/qemu-iotests/122.out b/tests/qemu-iotests/122.out index 849b6cc2ef..1a35951a80 100644 --- a/tests/qemu-iotests/122.out +++ b/tests/qemu-iotests/122.out @@ -228,4 +228,9 @@ Formatting 'TEST_DIR/t.IMGFMT.orig', fmt=IMGFMT size=67108864 wrote 65536/65536 bytes at offset 0 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) Images are identical. + +=== -n -B to an image without a backing file === + +Formatting 'TEST_DIR/t.IMGFMT.base', fmt=IMGFMT size=67108864 +Formatting 'TEST_DIR/t.IMGFMT.orig', fmt=IMGFMT size=67108864 *** done diff --git a/tests/qemu-iotests/139 b/tests/qemu-iotests/139 index 6b1a444364..7120d3142b 100755 --- a/tests/qemu-iotests/139 +++ b/tests/qemu-iotests/139 @@ -344,9 +344,6 @@ class TestBlockdevDel(iotests.QMPTestCase): @iotests.skip_if_unsupported(['quorum']) def testQuorum(self): - if not iotests.supports_quorum(): - return - self.addQuorum('quorum0', 'node0', 'node1') # We cannot remove the children of a Quorum device self.delBlockDriverState('node0', expect_error = True) diff --git a/tests/qemu-iotests/147 b/tests/qemu-iotests/147 index f4b0a11dba..d7a9f31089 100755 --- a/tests/qemu-iotests/147 +++ b/tests/qemu-iotests/147 @@ -134,7 +134,7 @@ class BuiltinNBD(NBDBlockdevAddBase): self.server.add_drive_raw('if=none,id=nbd-export,' + 'file=%s,' % test_img + 'format=%s,' % imgfmt + - 'cache=%s' % cachemode + + 'cache=%s,' % cachemode + 'aio=%s' % aiomode) self.server.launch() diff --git a/tests/qemu-iotests/259 b/tests/qemu-iotests/259 new file mode 100755 index 0000000000..62e29af05f --- /dev/null +++ b/tests/qemu-iotests/259 @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# +# Test generic image creation fallback (by using NBD) +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=mreitz@redhat.com + +seq=$(basename $0) +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt raw +_supported_proto nbd +_supported_os Linux + + +_make_test_img 64M + +echo +echo '--- Testing creation ---' + +$QEMU_IMG create -f qcow2 "$TEST_IMG" 64M | _filter_img_create +$QEMU_IMG info "$TEST_IMG" | _filter_img_info + +echo +echo '--- Testing creation for which the node would need to grow ---' + +# NBD does not support resizing, so this will fail +$QEMU_IMG create -f qcow2 -o preallocation=metadata "$TEST_IMG" 64M 2>&1 \ + | _filter_img_create + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/259.out b/tests/qemu-iotests/259.out new file mode 100644 index 0000000000..ffed19c2a0 --- /dev/null +++ b/tests/qemu-iotests/259.out @@ -0,0 +1,14 @@ +QA output created by 259 +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 + +--- Testing creation --- +Formatting 'TEST_DIR/t.IMGFMT', fmt=qcow2 size=67108864 +image: TEST_DIR/t.IMGFMT +file format: qcow2 +virtual size: 64 MiB (67108864 bytes) +disk size: unavailable + +--- Testing creation for which the node would need to grow --- +qemu-img: TEST_DIR/t.IMGFMT: Could not resize image: Image format driver does not support resize +Formatting 'TEST_DIR/t.IMGFMT', fmt=qcow2 size=67108864 preallocation=metadata +*** done diff --git a/tests/qemu-iotests/279 b/tests/qemu-iotests/279 index 6682376808..30d29b1cb2 100755 --- a/tests/qemu-iotests/279 +++ b/tests/qemu-iotests/279 @@ -38,6 +38,8 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 _supported_fmt qcow qcow2 vmdk qed _supported_proto file _supported_os Linux +_unsupported_imgopts "subformat=monolithicFlat" \ + "subformat=twoGbMaxExtentFlat" \ TEST_IMG="$TEST_IMG.base" _make_test_img 64M TEST_IMG="$TEST_IMG.mid" _make_test_img -b "$TEST_IMG.base" @@ -45,11 +47,12 @@ _make_test_img -b "$TEST_IMG.mid" echo echo '== qemu-img info --backing-chain ==' -_img_info --backing-chain | _filter_img_info +_img_info --backing-chain | _filter_img_info | grep -v 'backing file format' echo echo '== qemu-img info --backing-chain --image-opts ==' -TEST_IMG="driver=qcow2,file.driver=file,file.filename=$TEST_IMG" _img_info --backing-chain --image-opts | _filter_img_info +TEST_IMG="driver=$IMGFMT,file.driver=file,file.filename=$TEST_IMG" _img_info --backing-chain --image-opts \ + | _filter_img_info | grep -v 'backing file format' # success, all done echo "*** done" diff --git a/tests/qemu-iotests/284 b/tests/qemu-iotests/284 new file mode 100755 index 0000000000..071e89b33e --- /dev/null +++ b/tests/qemu-iotests/284 @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# +# Test ref count checks on encrypted images +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +# creator +owner=berrange@redhat.com + +seq=`basename $0` +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter + +_supported_fmt qcow2 +_supported_proto generic +_supported_os Linux + + +size=1M + +SECRET="secret,id=sec0,data=astrochicken" + +IMGSPEC="driver=$IMGFMT,file.filename=$TEST_IMG,encrypt.key-secret=sec0" +QEMU_IO_OPTIONS=$QEMU_IO_OPTIONS_NO_FMT + +_run_test() +{ + IMGOPTSSYNTAX=true + OLD_TEST_IMG="$TEST_IMG" + TEST_IMG="driver=$IMGFMT,file.filename=$TEST_IMG,encrypt.key-secret=sec0" + QEMU_IMG_EXTRA_ARGS="--image-opts --object $SECRET" + + echo + echo "== cluster size $csize" + echo "== checking image refcounts ==" + _check_test_img + + echo + echo "== writing some data ==" + $QEMU_IO -c "write -P 0x9 0 1" $QEMU_IMG_EXTRA_ARGS $TEST_IMG | _filter_qemu_io | _filter_testdir + echo + echo "== rechecking image refcounts ==" + _check_test_img + + echo + echo "== writing some more data ==" + $QEMU_IO -c "write -P 0x9 $csize 1" $QEMU_IMG_EXTRA_ARGS $TEST_IMG | _filter_qemu_io | _filter_testdir + echo + echo "== rechecking image refcounts ==" + _check_test_img + + TEST_IMG="$OLD_TEST_IMG" + QEMU_IMG_EXTRA_ARGS= + IMGOPTSSYNTAX= +} + + +echo +echo "testing LUKS qcow2 encryption" +echo + +for csize in 512 2048 32768 +do + _make_test_img --object $SECRET -o "encrypt.format=luks,encrypt.key-secret=sec0,encrypt.iter-time=10,cluster_size=$csize" $size + _run_test + _cleanup_test_img +done + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/284.out b/tests/qemu-iotests/284.out new file mode 100644 index 0000000000..48216f5742 --- /dev/null +++ b/tests/qemu-iotests/284.out @@ -0,0 +1,62 @@ +QA output created by 284 + +testing LUKS qcow2 encryption + +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 encrypt.format=luks encrypt.key-secret=sec0 encrypt.iter-time=10 + +== cluster size 512 +== checking image refcounts == +No errors were found on the image. + +== writing some data == +wrote 1/1 bytes at offset 0 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. + +== writing some more data == +wrote 1/1 bytes at offset 512 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 encrypt.format=luks encrypt.key-secret=sec0 encrypt.iter-time=10 + +== cluster size 2048 +== checking image refcounts == +No errors were found on the image. + +== writing some data == +wrote 1/1 bytes at offset 0 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. + +== writing some more data == +wrote 1/1 bytes at offset 2048 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 encrypt.format=luks encrypt.key-secret=sec0 encrypt.iter-time=10 + +== cluster size 32768 +== checking image refcounts == +No errors were found on the image. + +== writing some data == +wrote 1/1 bytes at offset 0 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. + +== writing some more data == +wrote 1/1 bytes at offset 32768 +1 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) + +== rechecking image refcounts == +No errors were found on the image. +*** done diff --git a/tests/qemu-iotests/286 b/tests/qemu-iotests/286 new file mode 100755 index 0000000000..f14445ba4a --- /dev/null +++ b/tests/qemu-iotests/286 @@ -0,0 +1,76 @@ +#!/usr/bin/env bash +# +# Test qemu-img snapshot -l +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +seq=$(basename "$0") +echo "QA output created by $seq" + +status=1 # failure is the default! + +_cleanup() +{ + _cleanup_test_img +} +trap "_cleanup; exit \$status" 0 1 2 3 15 + +# get standard environment, filters and checks +. ./common.rc +. ./common.filter +. ./common.qemu + +_supported_fmt qcow2 +_supported_proto file +# Internal snapshots are (currently) impossible with refcount_bits=1, +# and generally impossible with external data files +_unsupported_imgopts 'refcount_bits=1[^0-9]' data_file + +_make_test_img 64M + +# Should be so long as to take up the whole field width +sn_name=abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz + +# More memory will give us a larger VM state, i.e. one above 1 MB. +# This way, we get a number with a decimal point. +qemu_comm_method=monitor _launch_qemu -m 512 "$TEST_IMG" + +_send_qemu_cmd $QEMU_HANDLE "savevm $sn_name" '(qemu)' +_send_qemu_cmd $QEMU_HANDLE 'quit' '(qemu)' +wait=yes _cleanup_qemu + +# Check that all fields are separated by spaces. +# We first collapse all space sequences into one space each; +# then we turn every space-separated field into a '.'; +# and finally, we name the '.'s so the output is not just a confusing +# sequence of dots. + +echo 'Output structure:' +$QEMU_IMG snapshot -l "$TEST_IMG" | tail -n 1 | tr -s ' ' \ + | sed -e 's/\S\+/./g' \ + | sed -e 's/\./(snapshot ID)/' \ + -e 's/\./(snapshot name)/' \ + -e 's/\./(VM state size value)/' \ + -e 's/\./(VM state size unit)/' \ + -e 's/\./(snapshot date)/' \ + -e 's/\./(snapshot time)/' \ + -e 's/\./(VM clock)/' + +# success, all done +echo "*** done" +rm -f $seq.full +status=0 diff --git a/tests/qemu-iotests/286.out b/tests/qemu-iotests/286.out new file mode 100644 index 0000000000..39ff07e12c --- /dev/null +++ b/tests/qemu-iotests/286.out @@ -0,0 +1,8 @@ +QA output created by 286 +Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 +QEMU X.Y.Z monitor - type 'help' for more information +(qemu) savevm abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz +(qemu) quit +Output structure: +(snapshot ID) (snapshot name) (VM state size value) (VM state size unit) (snapshot date) (snapshot time) (VM clock) +*** done diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 1904223020..0317667695 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -273,6 +273,7 @@ 256 rw auto quick 257 rw 258 rw quick +259 rw auto quick 260 rw quick 261 rw 262 rw quick migration @@ -290,3 +291,5 @@ 280 rw migration quick 281 rw quick 283 auto quick +284 rw +286 rw quick diff --git a/util/Makefile.objs b/util/Makefile.objs index 11262aafaf..6b38b67cf1 100644 --- a/util/Makefile.objs +++ b/util/Makefile.objs @@ -20,6 +20,7 @@ util-obj-y += envlist.o path.o module.o util-obj-y += host-utils.o util-obj-y += bitmap.o bitops.o hbitmap.o util-obj-y += fifo8.o +util-obj-y += nvdimm-utils.o util-obj-y += cacheinfo.o util-obj-y += error.o qemu-error.o util-obj-y += qemu-print.o diff --git a/util/nvdimm-utils.c b/util/nvdimm-utils.c new file mode 100644 index 0000000000..5cc768ca47 --- /dev/null +++ b/util/nvdimm-utils.c @@ -0,0 +1,29 @@ +#include "qemu/nvdimm-utils.h" +#include "hw/mem/nvdimm.h" + +static int nvdimm_device_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_NVDIMM)) { + *list = g_slist_append(*list, DEVICE(obj)); + } + + object_child_foreach(obj, nvdimm_device_list, opaque); + return 0; +} + +/* + * inquire NVDIMM devices and link them into the list which is + * returned to the caller. + * + * Note: it is the caller's responsibility to free the list to avoid + * memory leak. + */ +GSList *nvdimm_get_device_list(void) +{ + GSList *list = NULL; + + object_child_foreach(qdev_get_machine(), nvdimm_device_list, &list); + return list; +} |