test: Port atomicity test to Python

Previously, this was implemented using a horrible GDB script (because
there is no such thing as a non-horrible GDB script).  This GDB script
often broke with newer versions of GDB for mysterious reasons.  Port
the test script to GDB's Python API, which makes the code much cleaner
and, hopefully, more stable.

(cherry picked from commit cbbda62258)

Conflicts:
	test/T380-atomicity.sh
This commit is contained in:
Austin Clements 2014-10-03 12:58:03 -04:00 committed by David Bremner
parent 01c8bf89a4
commit 776684c7b6
3 changed files with 72 additions and 55 deletions

View file

@ -64,7 +64,7 @@ if test_require_external_prereq gdb; then
# -tty /dev/null works around a conflict between the 'timeout' wrapper # -tty /dev/null works around a conflict between the 'timeout' wrapper
# and gdb's attempt to control the TTY. # and gdb's attempt to control the TTY.
export MAIL_DIR export MAIL_DIR
gdb -tty /dev/null -batch -x $TEST_DIRECTORY/atomicity.gdb notmuch >/dev/null 2>/dev/null gdb -tty /dev/null -batch -x $TEST_DIRECTORY/atomicity.py notmuch 1>gdb.out 2>&1
# Get the final, golden output # Get the final, golden output
notmuch search '*' > expected notmuch search '*' > expected

View file

@ -1,54 +0,0 @@
# This gdb script runs notmuch new and simulates killing and
# restarting notmuch new after every Xapian commit. To simulate this
# more efficiently, this script runs notmuch new and, immediately
# after every Xapian commit, it *pauses* the running notmuch new,
# copies the entire database and maildir to a snapshot directory, and
# executes a full notmuch new on that snapshot, comparing the final
# results with the expected output. It can then resume the paused
# notmuch new, which is still running on the original maildir, and
# repeat this process.
set args new
# Make Xapian commit after every operation instead of batching
set environment XAPIAN_FLUSH_THRESHOLD = 1
# gdb can't keep track of a simple integer. This is me weeping.
shell echo 0 > outcount
shell touch inodes
# work around apparent issue with lazy library loading on some
# platforms
set breakpoint pending on
break rename
commands
# As an optimization, only consider snapshots after a Xapian commit.
# Xapian overwrites record.base? as the last step in the commit.
shell echo > gdbcmd
shell stat -c %i $MAIL_DIR/.notmuch/xapian/record.base* > inodes.new
shell if cmp inodes inodes.new; then echo cont > gdbcmd; fi
shell mv inodes.new inodes
source gdbcmd
# Save a backtrace in case the test does fail
set logging file backtrace
set logging on
backtrace
set logging off
shell mv backtrace backtrace.`cat outcount`
# Snapshot the database
shell rm -r $MAIL_DIR.snap/.notmuch
shell cp -r $MAIL_DIR/.notmuch $MAIL_DIR.snap/.notmuch
# Restore the mtime of $MAIL_DIR.snap, which we just changed
shell touch -r $MAIL_DIR $MAIL_DIR.snap
# Run notmuch new to completion on the snapshot
shell NOTMUCH_CONFIG=${NOTMUCH_CONFIG}.snap XAPIAN_FLUSH_THRESHOLD=1000 notmuch new > /dev/null
shell NOTMUCH_CONFIG=${NOTMUCH_CONFIG}.snap notmuch search '*' > search.`cat outcount` 2>&1
shell echo $(expr $(cat outcount) + 1) > outcount
cont
end
run

71
test/atomicity.py Normal file
View file

@ -0,0 +1,71 @@
# This gdb Python script runs notmuch new and simulates killing and
# restarting notmuch new after every Xapian commit. To simulate this
# more efficiently, this script runs notmuch new and, immediately
# after every Xapian commit, it *pauses* the running notmuch new,
# copies the entire database and maildir to a snapshot directory, and
# executes a full notmuch new on that snapshot, comparing the final
# results with the expected output. It can then resume the paused
# notmuch new, which is still running on the original maildir, and
# repeat this process.
import gdb
import os
import glob
import shutil
import subprocess
gdb.execute('set args new')
# Make Xapian commit after every operation instead of batching
gdb.execute('set environment XAPIAN_FLUSH_THRESHOLD = 1')
maildir = os.environ['MAIL_DIR']
# Trap calls to rename, which happens just before Xapian commits
class RenameBreakpoint(gdb.Breakpoint):
def __init__(self, *args, **kwargs):
super(RenameBreakpoint, self).__init__(*args, **kwargs)
self.last_inodes = {}
self.n = 0
def stop(self):
# As an optimization, only consider snapshots after a Xapian
# has really committed. Xapian overwrites record.base? as the
# last step in the commit, so keep an eye on their inumbers.
inodes = {}
for path in glob.glob('%s/.notmuch/xapian/record.base*' % maildir):
inodes[path] = os.stat(path).st_ino
if inodes == self.last_inodes:
# Continue
return False
self.last_inodes = inodes
# Save a backtrace in case the test does fail
backtrace = gdb.execute('backtrace', to_string=True)
open('backtrace.%d' % self.n, 'w').write(backtrace)
# Snapshot the database
shutil.rmtree('%s.snap/.notmuch' % maildir)
shutil.copytree('%s/.notmuch' % maildir, '%s.snap/.notmuch' % maildir)
# Restore the mtime of $MAIL_DIR.snap/
shutil.copystat('%s/.notmuch' % maildir, '%s.snap/.notmuch' % maildir)
# Run notmuch new to completion on the snapshot
env = os.environ.copy()
env.update(NOTMUCH_CONFIG=os.environ['NOTMUCH_CONFIG'] + '.snap',
XAPIAN_FLUSH_THRESHOLD='1000')
subprocess.check_call(
['notmuch', 'new'], env=env, stdout=open('/dev/null', 'w'))
subprocess.check_call(
['notmuch', 'search', '*'], env=env,
stdout=open('search.%d' % self.n, 'w'))
# Tell the shell how far we've gotten
open('outcount', 'w').write(str(self.n + 1))
# Continue
self.n += 1
return False
RenameBreakpoint('rename')
gdb.execute('run')