summary refs log tree commit diff stats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/analyse-locks-simpletrace.py99
-rwxr-xr-xscripts/analyze-migration.py4
-rwxr-xr-xscripts/checkpatch.pl6
-rwxr-xr-xscripts/device-crash-test8
-rw-r--r--scripts/dump-guest-memory.py2
-rw-r--r--scripts/qemu-gdb.py4
-rw-r--r--scripts/qemugdb/tcg.py46
-rw-r--r--scripts/qemugdb/timers.py54
8 files changed, 213 insertions, 10 deletions
diff --git a/scripts/analyse-locks-simpletrace.py b/scripts/analyse-locks-simpletrace.py
new file mode 100755
index 0000000000..101e84dea5
--- /dev/null
+++ b/scripts/analyse-locks-simpletrace.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Analyse lock events and compute statistics
+#
+# Author: Alex Bennée <alex.bennee@linaro.org>
+#
+
+import os
+import simpletrace
+import argparse
+import numpy as np
+
+class MutexAnalyser(simpletrace.Analyzer):
+    "A simpletrace Analyser for checking locks."
+
+    def __init__(self):
+        self.locks = 0
+        self.locked = 0
+        self.unlocks = 0
+        self.mutex_records = {}
+
+    def _get_mutex(self, mutex):
+        if not mutex in self.mutex_records:
+            self.mutex_records[mutex] = {"locks": 0,
+                                         "lock_time": 0,
+                                         "acquire_times": [],
+                                         "locked": 0,
+                                         "locked_time": 0,
+                                         "held_times": [],
+                                         "unlocked": 0}
+
+        return self.mutex_records[mutex]
+
+    def qemu_mutex_lock(self, timestamp, mutex, filename, line):
+        self.locks += 1
+        rec = self._get_mutex(mutex)
+        rec["locks"] += 1
+        rec["lock_time"] = timestamp[0]
+        rec["lock_loc"] = (filename, line)
+
+    def qemu_mutex_locked(self, timestamp, mutex, filename, line):
+        self.locked += 1
+        rec = self._get_mutex(mutex)
+        rec["locked"] += 1
+        rec["locked_time"] = timestamp[0]
+        acquire_time = rec["locked_time"] - rec["lock_time"]
+        rec["locked_loc"] = (filename, line)
+        rec["acquire_times"].append(acquire_time)
+
+    def qemu_mutex_unlock(self, timestamp, mutex, filename, line):
+        self.unlocks += 1
+        rec = self._get_mutex(mutex)
+        rec["unlocked"] += 1
+        held_time = timestamp[0] - rec["locked_time"]
+        rec["held_times"].append(held_time)
+        rec["unlock_loc"] = (filename, line)
+
+
+def get_args():
+    "Grab options"
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--output", "-o", type=str, help="Render plot to file")
+    parser.add_argument("events", type=str, help='trace file read from')
+    parser.add_argument("tracefile", type=str, help='trace file read from')
+    return parser.parse_args()
+
+if __name__ == '__main__':
+    args = get_args()
+
+    # Gather data from the trace
+    analyser = MutexAnalyser()
+    simpletrace.process(args.events, args.tracefile, analyser)
+
+    print ("Total locks: %d, locked: %d, unlocked: %d" %
+           (analyser.locks, analyser.locked, analyser.unlocks))
+
+    # Now dump the individual lock stats
+    for key, val in sorted(analyser.mutex_records.iteritems(),
+                           key=lambda (k,v): v["locks"]):
+        print ("Lock: %#x locks: %d, locked: %d, unlocked: %d" %
+               (key, val["locks"], val["locked"], val["unlocked"]))
+
+        acquire_times = np.array(val["acquire_times"])
+        if len(acquire_times) > 0:
+            print ("  Acquire Time: min:%d median:%d avg:%.2f max:%d" %
+                   (acquire_times.min(), np.median(acquire_times),
+                    acquire_times.mean(), acquire_times.max()))
+
+        held_times = np.array(val["held_times"])
+        if len(held_times) > 0:
+            print ("  Held Time: min:%d median:%d avg:%.2f max:%d" %
+                   (held_times.min(), np.median(held_times),
+                    held_times.mean(), held_times.max()))
+
+        # Check if any locks still held
+        if val["locks"] > val["locked"]:
+            print ("  LOCK HELD (%s:%s)" % (val["locked_loc"]))
+            print ("  BLOCKED   (%s:%s)" % (val["lock_loc"]))
diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py
index 14553876a2..88ff4adb30 100755
--- a/scripts/analyze-migration.py
+++ b/scripts/analyze-migration.py
@@ -234,6 +234,10 @@ class HTABSection(object):
 
         header = self.file.read32()
 
+        if (header == -1):
+            # "no HPT" encoding
+            return
+
         if (header > 0):
             # First section, just the hash shift
             return
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 3dc27d9656..1b4b812e28 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -265,6 +265,7 @@ our @typeList = (
 	qr{${Ident}_handler_fn},
 	qr{target_(?:u)?long},
 	qr{hwaddr},
+	qr{xml${Ident}},
 );
 
 # This can be modified by sub possible.  Since it can be empty, be careful
@@ -1622,6 +1623,11 @@ sub process {
 			}
 		}
 
+# 'do ... while (0/false)' only makes sense in macros, without trailing ';'
+		if ($line =~ /while\s*\((0|false)\);/) {
+			ERROR("suspicious ; after while (0)\n" . $herecurr);
+		}
+
 # Check relative indent for conditionals and blocks.
 		if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
 			my ($s, $c) = ($stat, $cond);
diff --git a/scripts/device-crash-test b/scripts/device-crash-test
index 827d8ec2af..7417177ebb 100755
--- a/scripts/device-crash-test
+++ b/scripts/device-crash-test
@@ -207,11 +207,9 @@ ERROR_WHITELIST = [
     # Known crashes will generate error messages, but won't be fatal.
     # Those entries must be removed once we fix the crashes.
     {'exitcode':-6, 'log':r"Device 'serial0' is in use", 'loglevel':logging.ERROR},
-    {'exitcode':-6, 'log':r"spapr_rtas_register: Assertion .*rtas_table\[token\]\.name.* failed", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"qemu_net_client_setup: Assertion `!peer->peer' failed", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r'RAMBlock "[\w.-]+" already registered', 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"find_ram_offset: Assertion `size != 0' failed.", 'loglevel':logging.ERROR},
-    {'exitcode':-6, 'log':r"puv3_load_kernel: Assertion `kernel_filename != NULL' failed", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"add_cpreg_to_hashtable: code should not be reached", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"qemu_alloc_display: Assertion `surface->image != NULL' failed", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"Unexpected error in error_set_from_qdev_prop_error", 'loglevel':logging.ERROR},
@@ -219,16 +217,10 @@ ERROR_WHITELIST = [
     {'exitcode':-6, 'log':r"Object .* is not an instance of type generic-pc-machine", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"Object .* is not an instance of type e500-ccsr", 'loglevel':logging.ERROR},
     {'exitcode':-6, 'log':r"vmstate_register_with_alias_id: Assertion `!se->compat \|\| se->instance_id == 0' failed", 'loglevel':logging.ERROR},
-    {'exitcode':-11, 'device':'stm32f205-soc', 'loglevel':logging.ERROR, 'expected':True},
-    {'exitcode':-11, 'device':'xlnx,zynqmp', 'loglevel':logging.ERROR, 'expected':True},
-    {'exitcode':-11, 'device':'mips-cps', 'loglevel':logging.ERROR, 'expected':True},
     {'exitcode':-11, 'device':'gus', 'loglevel':logging.ERROR, 'expected':True},
-    {'exitcode':-11, 'device':'a9mpcore_priv', 'loglevel':logging.ERROR, 'expected':True},
-    {'exitcode':-11, 'device':'a15mpcore_priv', 'loglevel':logging.ERROR, 'expected':True},
     {'exitcode':-11, 'device':'isa-serial', 'loglevel':logging.ERROR, 'expected':True},
     {'exitcode':-11, 'device':'sb16', 'loglevel':logging.ERROR, 'expected':True},
     {'exitcode':-11, 'device':'cs4231a', 'loglevel':logging.ERROR, 'expected':True},
-    {'exitcode':-11, 'device':'arm-gicv3', 'loglevel':logging.ERROR, 'expected':True},
     {'exitcode':-11, 'machine':'isapc', 'device':'.*-iommu', 'loglevel':logging.ERROR, 'expected':True},
 
     # everything else (including SIGABRT and SIGSEGV) will be a fatal error:
diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py
index 09bec92b50..03fbf69f8a 100644
--- a/scripts/dump-guest-memory.py
+++ b/scripts/dump-guest-memory.py
@@ -564,7 +564,7 @@ shape and this command should mostly work."""
 
         vmcoreinfo = self.phys_memory_read(addr, size)
         if vmcoreinfo:
-            self.elf.add_vmcoreinfo_note(vmcoreinfo.tobytes())
+            self.elf.add_vmcoreinfo_note(bytes(vmcoreinfo))
 
     def invoke(self, args, from_tty):
         """Handles command invocation from gdb."""
diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py
index b3f8e04f77..690827e6fc 100644
--- a/scripts/qemu-gdb.py
+++ b/scripts/qemu-gdb.py
@@ -26,7 +26,7 @@ import os, sys
 
 sys.path.append(os.path.dirname(__file__))
 
-from qemugdb import aio, mtree, coroutine
+from qemugdb import aio, mtree, coroutine, tcg, timers
 
 class QemuCommand(gdb.Command):
     '''Prefix for QEMU debug support commands'''
@@ -38,6 +38,8 @@ QemuCommand()
 coroutine.CoroutineCommand()
 mtree.MtreeCommand()
 aio.HandlersCommand()
+tcg.TCGLockStatusCommand()
+timers.TimersCommand()
 
 coroutine.CoroutineSPFunction()
 coroutine.CoroutinePCFunction()
diff --git a/scripts/qemugdb/tcg.py b/scripts/qemugdb/tcg.py
new file mode 100644
index 0000000000..8c7f1d7454
--- /dev/null
+++ b/scripts/qemugdb/tcg.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GDB debugging support, TCG status
+#
+# Copyright 2016 Linaro Ltd
+#
+# Authors:
+#  Alex Bennée <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.  See
+# the COPYING file in the top-level directory.
+#
+# Contributions after 2012-01-13 are licensed under the terms of the
+# GNU GPL, version 2 or (at your option) any later version.
+
+# 'qemu tcg-lock-status' -- display the TCG lock status across threads
+
+import gdb
+
+class TCGLockStatusCommand(gdb.Command):
+    '''Display TCG Execution Status'''
+    def __init__(self):
+        gdb.Command.__init__(self, 'qemu tcg-lock-status', gdb.COMMAND_DATA,
+                             gdb.COMPLETE_NONE)
+
+    def invoke(self, arg, from_tty):
+        gdb.write("Thread, BQL (iothread_mutex), Replay, Blocked?\n")
+        for thread in gdb.inferiors()[0].threads():
+            thread.switch()
+
+            iothread = gdb.parse_and_eval("iothread_locked")
+            replay = gdb.parse_and_eval("replay_locked")
+
+            frame = gdb.selected_frame()
+            if frame.name() == "__lll_lock_wait":
+                frame.older().select()
+                mutex = gdb.parse_and_eval("mutex")
+                owner = gdb.parse_and_eval("mutex->__data.__owner")
+                blocked = ("__lll_lock_wait waiting on %s from %d" %
+                           (mutex, owner))
+            else:
+                blocked = "not blocked"
+
+            gdb.write("%d/%d, %s, %s, %s\n" % (thread.num, thread.ptid[1],
+                                               iothread, replay, blocked))
diff --git a/scripts/qemugdb/timers.py b/scripts/qemugdb/timers.py
new file mode 100644
index 0000000000..be71a001e3
--- /dev/null
+++ b/scripts/qemugdb/timers.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+# GDB debugging support
+#
+# Copyright 2017 Linaro Ltd
+#
+# Author: Alex Bennée <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2.  See
+# the COPYING file in the top-level directory.
+
+# 'qemu timers' -- display the current timerlists
+
+import gdb
+
+class TimersCommand(gdb.Command):
+    '''Display the current QEMU timers'''
+
+    def __init__(self):
+        'Register the class as a gdb command'
+        gdb.Command.__init__(self, 'qemu timers', gdb.COMMAND_DATA,
+                             gdb.COMPLETE_NONE)
+
+    def dump_timers(self, timer):
+        "Follow a timer and recursively dump each one in the list."
+        # timer should be of type QemuTimer
+        gdb.write("    timer %s/%s (cb:%s,opq:%s)\n" % (
+            timer['expire_time'],
+            timer['scale'],
+            timer['cb'],
+            timer['opaque']))
+
+        if int(timer['next']) > 0:
+            self.dump_timers(timer['next'])
+
+
+    def process_timerlist(self, tlist, ttype):
+        gdb.write("Processing %s timers\n" % (ttype))
+        gdb.write("  clock %s is enabled:%s, last:%s\n" % (
+            tlist['clock']['type'],
+            tlist['clock']['enabled'],
+            tlist['clock']['last']))
+        if int(tlist['active_timers']) > 0:
+            self.dump_timers(tlist['active_timers'])
+
+
+    def invoke(self, arg, from_tty):
+        'Run the command'
+        main_timers = gdb.parse_and_eval("main_loop_tlg")
+
+        # This will break if QEMUClockType in timer.h is redfined
+        self.process_timerlist(main_timers['tl'][0], "Realtime")
+        self.process_timerlist(main_timers['tl'][1], "Virtual")
+        self.process_timerlist(main_timers['tl'][2], "Host")
+        self.process_timerlist(main_timers['tl'][3], "Virtual RT")