diff options
Diffstat (limited to 'gitlab/issues/target_missing/host_missing/accel_missing/2379.toml')
| -rw-r--r-- | gitlab/issues/target_missing/host_missing/accel_missing/2379.toml | 134 |
1 files changed, 134 insertions, 0 deletions
diff --git a/gitlab/issues/target_missing/host_missing/accel_missing/2379.toml b/gitlab/issues/target_missing/host_missing/accel_missing/2379.toml new file mode 100644 index 000000000..1ac676cb6 --- /dev/null +++ b/gitlab/issues/target_missing/host_missing/accel_missing/2379.toml @@ -0,0 +1,134 @@ +id = 2379 +title = "virHashRemoveAll remove all jobs in priv->blockjobs but not set disk->priv->blockjob is null for qemuDomainObjPrivateDataClear and qemuProcessStop" +state = "closed" +created_at = "2024-06-03T07:08:44.020Z" +closed_at = "2024-06-03T09:13:43.342Z" +labels = [] +url = "https://gitlab.com/qemu-project/qemu/-/issues/2379" +host-os = "- OS/kernel version:" +host-arch = "- QEMU flavor:" +qemu-version = "- QEMU command line:" +guest-os = "- OS/kernel version:" +guest-arch = "## Description of problem" +description = """it call virHashRemoveAll to remove all jobs in priv->blockjobs but the disk privateData blockjob is not null for qemuDomainObjPrivateDataClear and qemuProcessStop. when virHashRemoveAll is caled, accessing priv->blockjob cause segfault in others.""" +reproduce = """1. virsh blockcopy testvm vda /root/disk/centos7-copy.qcow2 --wait --verbose --pivot + migrate disk of vm +2. poweoff in guest vm +3. libvirt core dump""" +additional = """--Type <RET> for more, q to quit, c to continue without paging-- +\tProgram terminated with signal SIGSEGV, Segmentation fault. +\t#0 qemuBlockJobUnregister (vm=0x7f823c045050, job=0x7f827c03ca90) at ../src/qemu/qemu_blockjob.c:211 +\t211 if (job == diskPriv->blockjob) { +\t[Current thread is 1 (Thread 0x7f8283640640 (LWP 152))] +\t(gdb) bt +\t#0 qemuBlockJobUnregister (vm=0x7f823c045050, job=0x7f827c03ca90) at ../src/qemu/qemu_blockjob.c:211 +\t#1 qemuBlockJobEventProcessConcluded (asyncJob=VIR_ASYNC_JOB_MIGRATION_OUT, vm=<optimized out>, driver=<optimized out>, +\t\tjob=0x7f827c03ca90) at ../src/qemu/qemu_blockjob.c:1678 +\t#2 qemuBlockJobEventProcess (asyncJob=VIR_ASYNC_JOB_MIGRATION_OUT, job=0x7f827c03ca90, vm=<optimized out>, +\t\tdriver=<optimized out>) at ../src/qemu/qemu_blockjob.c:1703 +\t#3 qemuBlockJobUpdate (vm=<optimized out>, job=0x7f827c03ca90, asyncJob=1) at ../src/qemu/qemu_blockjob.c:1756 +\t#4 0x00007f828050c95f in qemuMigrationSrcNBDStorageCopyReady (vm=0x7f823c045050, asyncJob=VIR_ASYNC_JOB_MIGRATION_OUT) +\t\tat ../src/qemu/qemu_migration.c:605 +\t#5 0x00007f8280518ca5 in qemuMigrationSrcNBDStorageCopy (flags=587, nbdURI=<optimized out>, tlsHostname=0x7f823c2b51d0 "", +\t\ttlsAlias=<optimized out>, dconn=0x7f823c014790, migrate_disks=0x7f827c006660, nmigrate_disks=2, speed=<optimized out>, +\t\thost=0x7f827c0156a0 "10.253.160.196", mig=0x7f827c027a30, vm=0x7f823c045050, driver=0x7f823c01ac40) +\t\tat ../src/qemu/qemu_migration.c:1202 +\t#6 qemuMigrationSrcRun (driver=0x7f823c01ac40, vm=0x7f823c045050, persist_xml=<optimized out>, cookiein=<optimized out>, +\t\tcookieinlen=<optimized out>, cookieout=0x7f828363f500, cookieoutlen=0x7f828363f4d4, flags=587, resource=1024, +\t\tspec=0x7f828363f330, dconn=0x7f823c014790, graphicsuri=0x0, nmigrate_disks=2, migrate_disks=0x7f827c006660, +\t\tmigParams=0x7f827c00d890, nbdURI=0x0) at ../src/qemu/qemu_migration.c:4167 +\t#7 0x00007f828051a5dd in qemuMigrationSrcPerformNative (driver=0x7f823c01ac40, vm=0x7f823c045050, +\t\tpersist_xml=0x7f827c020660 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\turi=<optimized out>, +\t\tcookiein=0x7f827c0519e0 "<qemu-migration>\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <hostname>ceasphere-node-1</hostname>\\n <hostuuid>5b0a0842-6535-27c1-b2e7-89c4ac4fd785</hostuuid>"..., +\t\tcookieinlen=876, cookieout=0x7f828363f500, cookieoutlen=0x7f828363f4d4, flags=587, resource=1024, dconn=0x7f823c014790, +\t\tgraphicsuri=0x0, nmigrate_disks=2, migrate_disks=0x7f827c006660, migParams=0x7f827c00d890, nbdURI=0x0) +\t\tat ../src/qemu/qemu_migration.c:4506 +\t#8 0x00007f828051c3e3 in qemuMigrationSrcPerformPeer2Peer3 (flags=<optimized out>, useParams=true, bandwidth=<optimized out>, +\t\tmigParams=0x7f827c00d890, nbdURI=0x0, nbdPort=0, migrate_disks=0x7f827c006660, nmigrate_disks=<optimized out>, +\t\tlistenAddress=<optimized out>, graphicsuri=0x0, uri=<optimized out>, dname=0x0, +\t\tpersist_xml=0x7f827c020660 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\txmlin=<optimized out>, vm=0x7f823c045050, +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", dconn=0x7f823c014790, sconn=0x7f826c00e890, driver=0x7f823c01ac40) at ../src/qemu/qemu_migration.c:4925 +\t#9 qemuMigrationSrcPerformPeer2Peer (v3proto=<synthetic pointer>, resource=<optimized out>, dname=0x0, flags=587, +\t\tmigParams=0x7f827c00d890, nbdURI=0x0, nbdPort=0, migrate_disks=0x7f827c006660, nmigrate_disks=<optimized out>, +\t\tlistenAddress=<optimized out>, graphicsuri=0x0, uri=<optimized out>, +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", +\t--Type <RET> for more, q to quit, c to continue without paging-- +\t\tpersist_xml=0x7f827c020660 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\txmlin=<optimized out>, vm=0x7f823c045050, sconn=0x7f826c00e890, driver=0x7f823c01ac40) at ../src/qemu/qemu_migration.c:5230 +\t#10 qemuMigrationSrcPerformJob (driver=0x7f823c01ac40, conn=0x7f826c00e890, vm=0x7f823c045050, xmlin=<optimized out>, +\t\tpersist_xml=0x7f827c020660 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", uri=<optimized out>, graphicsuri=<optimized out>, listenAddress=<optimized out>, nmigrate_disks=<optimized out>, +\t\tmigrate_disks=<optimized out>, nbdPort=0, nbdURI=<optimized out>, migParams=<optimized out>, cookiein=<optimized out>, +\t\tcookieinlen=0, cookieout=<optimized out>, cookieoutlen=<optimized out>, flags=<optimized out>, dname=<optimized out>, +\t\tresource=<optimized out>, v3proto=<optimized out>) at ../src/qemu/qemu_migration.c:5307 +\t#11 0x00007f828051cce7 in qemuMigrationSrcPerform (driver=0x7f823c01ac40, conn=0x7f826c00e890, vm=0x7f823c045050, +\t\txmlin=0x7f827c01e630 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\tpersist_xml=0x7f827c020660 "<domain type=\\"kvm\\">\\n <name>default_vm-8altm</name>\\n <uuid>4a40fa64-fd9b-5078-8574-3ce5d0041d31</uuid>\\n <metadata>\\n <nodeagent xmlns=\\"http://kubevirt.io/node-agent.io\\">\\n <vmid>13fb0e90-2930-"..., +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", uri=0x556a1f856b20 "tcp://10.253.160.196:27939", graphicsuri=0x0, listenAddress=0x0, nmigrate_disks=2, +\t\tmigrate_disks=0x7f827c006660, nbdPort=0, nbdURI=0x0, migParams=0x7f827c00d890, cookiein=0x0, cookieinlen=0, +\t\tcookieout=0x7f828363f8a8, cookieoutlen=0x7f828363f89c, flags=587, dname=0x0, resource=1024, v3proto=true) +\t\tat ../src/qemu/qemu_migration.c:5513 +\t#12 0x00007f82804e34d0 in qemuDomainMigratePerform3Params (dom=0x7f8268002ee0, +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", params=0x7f827c01e380, nparams=7, cookiein=0x0, cookieinlen=0, cookieout=0x7f828363f8a8, +\t\tcookieoutlen=0x7f828363f89c, flags=587) at ../src/qemu/qemu_driver.c:11796 +\t#13 0x00007f82853256d6 in virDomainMigratePerform3Params (domain=domain@entry=0x7f8268002ee0, +\t\tdconnuri=0x7f827c00c2b0 "qemu+unix:///system?socket=/var/run/kubevirt/migrationproxy/13fb0e90-2930-4f0b-959a-cc40346e7d64-source.sock", params=<optimized out>, nparams=7, cookiein=0x0, cookieinlen=0, cookieout=0x7f828363f8a8, +\t\tcookieoutlen=0x7f828363f89c, flags=587) at ../src/libvirt-domain.c:5165 +\t#14 0x0000556a1f200f17 in remoteDispatchDomainMigratePerform3Params (server=<optimized out>, msg=0x556a1f86ba40, +\t\tret=0x7f827c0197f0, args=0x7f827c019520, rerr=0x7f828363f9a0, client=<optimized out>) +\t\tat ../src/remote/remote_daemon_dispatch.c:5710 +\t#15 remoteDispatchDomainMigratePerform3ParamsHelper (server=<optimized out>, client=<optimized out>, msg=0x556a1f86ba40, +\t\trerr=0x7f828363f9a0, args=0x7f827c019520, ret=0x7f827c0197f0) at src/remote/remote_daemon_dispatch_stubs.h:8761 +\t#16 0x00007f828522c676 in virNetServerProgramDispatchCall (msg=0x556a1f86ba40, client=0x556a1f85b510, server=0x556a1f84c080, +\t\tprog=0x556a1f850410) at ../src/rpc/virnetserverprogram.c:428 +\t#17 virNetServerProgramDispatch (prog=0x556a1f850410, server=0x556a1f84c080, client=0x556a1f85b510, msg=0x556a1f86ba40) +\t\tat ../src/rpc/virnetserverprogram.c:302 +\t--Type <RET> for more, q to quit, c to continue without paging-- +\t#18 0x00007f82852331d8 in virNetServerProcessMsg (msg=<optimized out>, prog=<optimized out>, client=<optimized out>, +\t\tsrv=0x556a1f84c080) at ../src/rpc/virnetserver.c:140 +\t#19 virNetServerHandleJob (jobOpaque=0x556a1f861f90, opaque=0x556a1f84c080) at ../src/rpc/virnetserver.c:160 +\t#20 0x00007f8285170653 in virThreadPoolWorker (opaque=<optimized out>) at ../src/util/virthreadpool.c:164 +\t#21 0x00007f828516fc09 in virThreadHelper (data=<optimized out>) at ../src/util/virthread.c:256 +\t#22 0x00007f8284b10802 in start_thread () from /lib64/libc.so.6 +\t#23 0x00007f8284ab0450 in clone3 () from /lib64/libc.so.6 + + +\t(gdb) p job +\t$1 = (qemuBlockJobData *) 0x7f827c03ca90 +\t(gdb) p *job +\t$2 = {parent = {parent_instance = {g_type_instance = {g_class = 0x7f827c00dc90}, ref_count = 1, qdata = 0x0}}, +\t name = 0x7f827c038cd0 "drive-ua-vol-vm-8altm", disk = 0x7f823c0475c0, chain = 0x556a1f8548f0, mirrorChain = 0x0, +\t jobflags = 0, jobflagsmissing = false, data = {pull = {base = 0x0}, commit = {topparent = 0x0, top = 0x0, base = 0x0, +\t\t deleteCommittedImages = false}, create = {storage = false, src = 0x0}, copy = {shallownew = false}, backup = { +\t\t store = 0x0, bitmap = 0x0}}, type = 2, state = 5, errmsg = 0x0, synchronous = true, newstate = 6, brokentype = 0, +\t invalidData = false, reconnected = false} +\t(gdb) p *job->disk +\t$3 = {src = 0x7f823c047, privateData = 0xffe8eec3390edb93, device = VIR_DOMAIN_DISK_DEVICE_DISK, +\t bus = VIR_DOMAIN_DISK_BUS_VIRTIO, dst = 0x7f823c047300 "\\327_'ą\\177", tray_status = VIR_DOMAIN_DISK_TRAY_CLOSED, +\t removable = VIR_TRISTATE_SWITCH_ABSENT, rotation_rate = 0, mirror = 0x0, mirrorState = 0, mirrorJob = 0, geometry = { +\t\tcylinders = 0, heads = 0, sectors = 0, trans = VIR_DOMAIN_DISK_TRANS_DEFAULT}, blockio = {logical_block_size = 0, +\t\tphysical_block_size = 0}, blkdeviotune = {total_bytes_sec = 0, read_bytes_sec = 0, write_bytes_sec = 0, +\t\ttotal_iops_sec = 0, read_iops_sec = 0, write_iops_sec = 0, total_bytes_sec_max = 0, read_bytes_sec_max = 0, +\t\twrite_bytes_sec_max = 0, total_iops_sec_max = 0, read_iops_sec_max = 0, write_iops_sec_max = 0, size_iops_sec = 0, +\t\tgroup_name = 0x0, total_bytes_sec_max_length = 0, read_bytes_sec_max_length = 0, write_bytes_sec_max_length = 0, +\t\ttotal_iops_sec_max_length = 0, read_iops_sec_max_length = 0, write_iops_sec_max_length = 0}, +\t driverName = 0x7f823c047270 "\\267\\262'ą\\177", serial = 0x0, wwn = 0x0, vendor = 0x0, product = 0x0, +\t cachemode = VIR_DOMAIN_DISK_CACHE_DISABLE, error_policy = VIR_DOMAIN_DISK_ERROR_POLICY_RETRY, +\t rerror_policy = VIR_DOMAIN_DISK_ERROR_POLICY_DEFAULT, retry_interval = 1000, retry_timeout = 0, +\t iomode = VIR_DOMAIN_DISK_IO_NATIVE, ioeventfd = VIR_TRISTATE_SWITCH_ABSENT, event_idx = VIR_TRISTATE_SWITCH_ABSENT, +\t copy_on_read = VIR_TRISTATE_SWITCH_ABSENT, snapshot = VIR_DOMAIN_SNAPSHOT_LOCATION_DEFAULT, +\t startupPolicy = VIR_DOMAIN_STARTUP_POLICY_DEFAULT, transient = false, transientShareBacking = VIR_TRISTATE_BOOL_ABSENT, +\t info = {alias = 0x0, type = 0, addr = {pci = {domain = 0, bus = 0, slot = 0, function = 0, +\t\t\tmulti = VIR_TRISTATE_SWITCH_ABSENT, extFlags = 0, zpci = {uid = {value = 0, isSet = false}, fid = {value = 0, +\t\t\t\tisSet = false}}}, drive = {controller = 0, bus = 0, target = 0, unit = 0, diskbus = 0}, vioserial = { +\t\t\tcontroller = 0, bus = 0, port = 0}, ccid = {controller = 0, slot = 0}, usb = {bus = 0, port = {0, 0, 0, 0}}, +\t\t spaprvio = {reg = 0, has_reg = false}, ccw = {cssid = 0, ssid = 0, devno = 0, assigned = false}, isa = {iobase = 0, +\t\t\tirq = 0}, dimm = {slot = 0, base = 0}}, mastertype = 0, master = {usb = {startport = 0}}, +\t\tromenabled = VIR_TRISTATE_BOOL_ABSENT, rombar = VIR_TRISTATE_SWITCH_ABSENT, romfile = 0x0, bootIndex = 1, +\t\teffectiveBootIndex = 1, acpiIndex = 0, pciConnectFlags = 9, pciAddrExtFlags = 0, loadparm = 0x0, isolationGroup = 0, +\t\tisolationGroupLocked = false}, rawio = VIR_TRISTATE_BOOL_ABSENT, sgio = VIR_DOMAIN_DEVICE_SGIO_DEFAULT, +\t discard = VIR_DOMAIN_DISK_DISCARD_DEFAULT, iothread = 1, detect_zeroes = VIR_DOMAIN_DISK_DETECT_ZEROES_DEFAULT, +\t domain_name = 0x0, queues = 0, queue_size = 0, model = VIR_DOMAIN_DISK_MODEL_DEFAULT, virtio = 0x7f823c047170, +\t\t diskElementAuth = false, diskElementEnc = false}""" |