[PATCH 01/17] testtools: Import new upstream snapshot.

Jelmer Vernooij jelmer at samba.org
Sat Nov 1 13:21:53 MDT 2014


Change-Id: If63092b5e702121fdc27793c5e796931b4b94e8b
Signed-Off-By: Jelmer Vernooij <jelmer at samba.org>
---
 lib/testtools/.gitignore                           |  18 +
 lib/testtools/.gitreview                           |   4 +
 lib/testtools/.testr.conf                          |   2 +-
 lib/testtools/.travis.yml                          |  25 +
 lib/testtools/LICENSE                              |   1 +
 lib/testtools/MANIFEST.in                          |   4 +-
 lib/testtools/Makefile                             |   4 +-
 lib/testtools/NEWS                                 | 402 +++++++++-
 lib/testtools/README                               |  89 ---
 lib/testtools/README.rst                           |  92 +++
 lib/testtools/doc/api.rst                          |  26 +
 lib/testtools/doc/for-framework-folk.rst           | 235 +++++-
 lib/testtools/doc/for-test-authors.rst             | 149 +++-
 lib/testtools/doc/hacking.rst                      |  97 ++-
 lib/testtools/doc/index.rst                        |   2 +-
 lib/testtools/doc/overview.rst                     |  13 +-
 lib/testtools/scripts/_lp_release.py               |   6 +-
 lib/testtools/setup.py                             |  51 +-
 lib/testtools/testtools/__init__.py                | 120 ++-
 lib/testtools/testtools/assertions.py              |  22 +
 lib/testtools/testtools/compat.py                  | 100 ++-
 lib/testtools/testtools/content.py                 | 195 +++--
 lib/testtools/testtools/content_type.py            |   2 +-
 lib/testtools/testtools/deferredruntest.py         |  18 +-
 lib/testtools/testtools/distutilscmd.py            |   2 +-
 lib/testtools/testtools/helpers.py                 |  83 +-
 lib/testtools/testtools/matchers/__init__.py       |   6 +
 lib/testtools/testtools/matchers/_basic.py         |  13 +-
 lib/testtools/testtools/matchers/_dict.py          |   2 +-
 lib/testtools/testtools/matchers/_exception.py     |  24 +-
 lib/testtools/testtools/matchers/_higherorder.py   |  79 ++
 lib/testtools/testtools/matchers/_impl.py          |   4 +-
 lib/testtools/testtools/run.py                     | 228 +++++-
 lib/testtools/testtools/runtest.py                 |  38 +-
 lib/testtools/testtools/testcase.py                | 322 ++++++--
 lib/testtools/testtools/testresult/__init__.py     |  24 +
 lib/testtools/testtools/testresult/doubles.py      |  24 +
 lib/testtools/testtools/testresult/real.py         | 800 ++++++++++++++++++-
 lib/testtools/testtools/tests/__init__.py          |   5 +-
 lib/testtools/testtools/tests/helpers.py           |  13 +-
 .../testtools/tests/matchers/test_basic.py         |  24 +-
 .../testtools/tests/matchers/test_dict.py          |   7 +-
 .../testtools/tests/matchers/test_exception.py     |   7 +-
 .../testtools/tests/matchers/test_higherorder.py   |  27 +
 lib/testtools/testtools/tests/test_assert_that.py  | 152 ++++
 lib/testtools/testtools/tests/test_compat.py       | 183 ++++-
 lib/testtools/testtools/tests/test_content.py      |  80 +-
 lib/testtools/testtools/tests/test_content_type.py |   2 +-
 .../testtools/tests/test_deferredruntest.py        |  17 +-
 lib/testtools/testtools/tests/test_distutilscmd.py |   7 +-
 .../testtools/tests/test_fixturesupport.py         |  30 +-
 lib/testtools/testtools/tests/test_helpers.py      | 183 -----
 lib/testtools/testtools/tests/test_run.py          | 221 +++++-
 lib/testtools/testtools/tests/test_runtest.py      |  58 +-
 lib/testtools/testtools/tests/test_spinner.py      |  20 +-
 lib/testtools/testtools/tests/test_testcase.py     | 416 +++++++++-
 lib/testtools/testtools/tests/test_testresult.py   | 851 ++++++++++++++++++++-
 lib/testtools/testtools/tests/test_testsuite.py    | 155 +++-
 lib/testtools/testtools/tests/test_with_with.py    |  15 +
 lib/testtools/testtools/testsuite.py               | 173 ++++-
 60 files changed, 5157 insertions(+), 815 deletions(-)
 create mode 100644 lib/testtools/.gitignore
 create mode 100644 lib/testtools/.gitreview
 create mode 100644 lib/testtools/.travis.yml
 delete mode 100644 lib/testtools/README
 create mode 100644 lib/testtools/README.rst
 create mode 100644 lib/testtools/doc/api.rst
 create mode 100644 lib/testtools/testtools/assertions.py
 create mode 100644 lib/testtools/testtools/tests/test_assert_that.py

diff --git a/lib/testtools/.gitignore b/lib/testtools/.gitignore
new file mode 100644
index 0000000..acf9b74
--- /dev/null
+++ b/lib/testtools/.gitignore
@@ -0,0 +1,18 @@
+__pycache__
+./build
+MANIFEST
+dist
+tags
+TAGS
+apidocs
+_trial_temp
+doc/_build
+.testrepository
+.lp_creds
+./testtools.egg-info
+*.pyc
+*.swp
+*~
+testtools.egg-info
+/build/
+/.env/
diff --git a/lib/testtools/.gitreview b/lib/testtools/.gitreview
new file mode 100644
index 0000000..5d15856
--- /dev/null
+++ b/lib/testtools/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.testing-cabal.org
+port=29418
+project=testing-cabal/testtools.git
diff --git a/lib/testtools/.testr.conf b/lib/testtools/.testr.conf
index 8a65628..e695109 100644
--- a/lib/testtools/.testr.conf
+++ b/lib/testtools/.testr.conf
@@ -1,4 +1,4 @@
 [DEFAULT]
-test_command=${PYTHON:-python} -m subunit.run discover . $LISTOPT $IDOPTION
+test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION testtools.tests.test_suite
 test_id_option=--load-list $IDFILE
 test_list_option=--list
diff --git a/lib/testtools/.travis.yml b/lib/testtools/.travis.yml
new file mode 100644
index 0000000..5e0e85a
--- /dev/null
+++ b/lib/testtools/.travis.yml
@@ -0,0 +1,25 @@
+language: python
+
+python:
+  - "2.6"
+  - "2.7"
+  - "3.3"
+  - "pypy"
+
+# We have to pin Jinja2 < 2.7  for Python 3.2 because 2.7 drops/breaks support:
+# http://jinja.pocoo.org/docs/changelog/#version-2-7
+#
+# See also:
+# http://stackoverflow.com/questions/18252804/syntax-error-in-jinja-2-library
+matrix:
+  include:
+    - python: "3.2"
+      env: JINJA_REQ="jinja2<2.7"
+
+install:
+  - pip install -q --use-mirrors fixtures extras python-mimeparse $JINJA_REQ sphinx
+  - python setup.py -q install
+
+script:
+  - python -m testtools.run testtools.tests.test_suite
+  - make clean-sphinx docs
diff --git a/lib/testtools/LICENSE b/lib/testtools/LICENSE
index d59dc7c..21010cc 100644
--- a/lib/testtools/LICENSE
+++ b/lib/testtools/LICENSE
@@ -17,6 +17,7 @@ The testtools authors are:
  * Gavin Panella
  * Martin Pool
  * Vincent Ladeuil
+ * Nikola Đipanov
 
 and are collectively referred to as "testtools developers".
 
diff --git a/lib/testtools/MANIFEST.in b/lib/testtools/MANIFEST.in
index 7da191a..4619349 100644
--- a/lib/testtools/MANIFEST.in
+++ b/lib/testtools/MANIFEST.in
@@ -2,8 +2,8 @@ include LICENSE
 include Makefile
 include MANIFEST.in
 include NEWS
-include README
-include .bzrignore
+include README.rst
+include .gitignore
 graft doc
 graft doc/_static
 graft doc/_templates
diff --git a/lib/testtools/Makefile b/lib/testtools/Makefile
index b3e40ec..ccaa776 100644
--- a/lib/testtools/Makefile
+++ b/lib/testtools/Makefile
@@ -1,4 +1,4 @@
-# See README for copyright and licensing details.
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
 
 PYTHON=python
 SOURCES=$(shell find testtools -name "*.py")
@@ -34,7 +34,7 @@ apidocs:
 	PYTHONWARNINGS='ignore::DeprecationWarning' \
 		pydoctor --make-html --add-package testtools \
 		--docformat=restructuredtext --project-name=testtools \
-		--project-url=https://launchpad.net/testtools
+		--project-url=https://github.com/testing-cabal/testtools
 
 doc/news.rst:
 	ln -s ../NEWS doc/news.rst
diff --git a/lib/testtools/NEWS b/lib/testtools/NEWS
index 6f3cb8c..ea4288f 100644
--- a/lib/testtools/NEWS
+++ b/lib/testtools/NEWS
@@ -3,9 +3,409 @@ testtools NEWS
 
 Changes and improvements to testtools_, grouped by release.
 
+
 NEXT
 ~~~~
 
+1.1.0
+~~~~~
+
+Improvements
+------------
+
+* Exceptions in a ``fixture.getDetails`` method will no longer mask errors
+  raised from the same fixture's ``setUp`` method.
+  (Robert Collins, #1368440)
+
+1.0.0
+~~~~~
+
+Long overdue, we've adopted a backwards compatibility statement and recognized
+that we have plenty of users depending on our behaviour - calling our version
+1.0.0 is a recognition of that.
+
+Improvements
+------------
+
+* Fix a long-standing bug where tearDown and cleanUps would not be called if the
+  test run was interrupted. This should fix leaking external resources from
+  interrupted tests.
+  (Robert Collins, #1364188)
+
+* Fix a long-standing bug where calling sys.exit(0) from within a test would
+  cause the test suite to exit with 0, without reporting a failure of that
+  test. We still allow the test suite to be exited (since catching higher order
+  exceptions requires exceptional circumstances) but we now call a last-resort
+  handler on the TestCase, resulting in an error being reported for the test.
+  (Robert Collins, #1364188)
+
+* Fix an issue where tests skipped with the ``skip``* family of decorators would
+  still have their ``setUp`` and ``tearDown`` functions called.
+  (Thomi Richards, #https://github.com/testing-cabal/testtools/issues/86)
+
+* We have adopted a formal backwards compatibility statement (see hacking.rst)
+  (Robert Collins)
+
+0.9.39
+~~~~~~
+
+Brown paper bag release - 0.9.38 was broken for some users,
+_jython_aware_splitext was not defined entirely compatibly.
+(Robert Collins, #https://github.com/testing-cabal/testtools/issues/100)
+
+0.9.38
+~~~~~~
+
+Bug fixes for test importing.
+
+Improvements
+------------
+
+* Discovery import error detection wasn't implemented for python 2.6 (the
+  'discover' module). (Robert Collins)
+
+* Discovery now executes load_tests (if present) in __init__ in all packages.
+  (Robert Collins, http://bugs.python.org/issue16662)
+
+0.9.37
+~~~~~~
+
+Minor improvements to correctness.
+
+Changes
+-------
+
+* ``stdout`` is now correctly honoured on ``run.TestProgram`` - before the
+  runner objects would be created with no stdout parameter. If construction
+  fails, the previous parameter list is attempted, permitting compatibility
+  with Runner classes that don't accept stdout as a parameter.
+  (Robert Collins)
+
+* The ``ExtendedToStreamDecorator`` now handles content objects with one less
+  packet - the last packet of the source content is sent with EOF set rather
+  than an empty packet with EOF set being sent after the last packet of the
+  source content. (Robert Collins)
+
+0.9.36
+~~~~~~
+
+Welcome to our long overdue 0.9.36 release, which improves compatibility with
+Python3.4, adds assert_that, a function for using matchers without TestCase
+objects, and finally will error if you try to use setUp or tearDown twice -
+since that invariably leads to bad things of one sort or another happening.
+
+Changes
+-------
+
+* Error if ``setUp`` or ``tearDown`` are called twice.
+  (Robert Collins, #882884)
+
+* Make testtools compatible with the ``unittest.expectedFailure`` decorator in
+  Python 3.4. (Thomi Richards)
+
+
+Improvements
+------------
+
+* Introduce the assert_that function, which allows matchers to be used
+  independent of testtools.TestCase. (Daniel Watkins, #1243834)
+
+
+0.9.35
+~~~~~~
+
+Changes
+-------
+
+* Removed a number of code paths where Python 2.4 and Python 2.5 were
+  explicitly handled. (Daniel Watkins)
+
+Improvements
+------------
+
+* Added the ``testtools.TestCase.expectThat`` method, which implements
+  delayed assertions. (Thomi Richards)
+
+* Docs are now built as part of the Travis-CI build, reducing the chance of
+  Read The Docs being broken accidentally. (Daniel Watkins, #1158773)
+
+0.9.34
+~~~~~~
+
+Improvements
+------------
+
+* Added ability for ``testtools.TestCase`` instances to force a test to
+  fail, even if no assertions failed. (Thomi Richards)
+
+* Added ``testtools.content.StacktraceContent``, a content object that
+  automatically creates a ``StackLinesContent`` object containing the current
+  stack trace. (Thomi Richards)
+
+* ``AnyMatch`` is now exported properly in ``testtools.matchers``.
+  (Robert Collins, Rob Kennedy, github #44)
+
+* In Python 3.3, if there are duplicate test ids, tests.sort() will
+  fail and raise TypeError. Detect the duplicate test ids firstly in
+  sorted_tests() to ensure that all test ids are unique.
+  (Kui Shi, #1243922)
+
+* ``json_content`` is now in the ``__all__`` attribute for
+  ``testtools.content``. (Robert Collins)
+
+* Network tests now bind to 127.0.0.1 to avoid (even temporary) network
+  visible ports. (Benedikt Morbach, github #46)
+
+* Test listing now explicitly indicates by printing 'Failed to import' and
+  exiting (2) when an import has failed rather than only signalling through the
+  test name. (Robert Collins, #1245672)
+
+* ``test_compat.TestDetectEncoding.test_bom`` now works on Python 3.3 - the
+  corner case with euc_jp is no longer permitted in Python 3.3 so we can
+  skip it. (Martin [gz], #1251962)
+
+0.9.33
+~~~~~~
+
+Improvements
+------------
+
+* Added ``addDetailuniqueName`` method to ``testtools.TestCase`` class.
+  (Thomi Richards)
+
+* Removed some unused code from ``testtools.content.TracebackContent``.
+  (Thomi Richards)
+
+* Added ``testtools.StackLinesContent``: a content object for displaying
+  pre-processed stack lines. (Thomi Richards)
+
+* ``StreamSummary`` was calculating testsRun incorrectly: ``exists`` status
+  tests were counted as run tests, but they are not.
+  (Robert Collins, #1203728)
+
+0.9.32
+~~~~~~
+
+Regular maintenance release.  Special thanks to new contributor, Xiao Hanyu!
+
+Changes
+-------
+
+ * ``testttols.compat._format_exc_info`` has been refactored into several
+   smaller functions. (Thomi Richards)
+
+Improvements
+------------
+
+* Stacktrace filtering no longer hides unittest frames that are surrounded by
+  user frames. We will reenable this when we figure out a better algorithm for
+  retaining meaning. (Robert Collins, #1188420)
+
+* The compatibility code for skipped tests with unittest2 was broken.
+  (Robert Collins, #1190951)
+
+* Various documentation improvements (Clint Byrum, Xiao Hanyu).
+
+0.9.31
+~~~~~~
+
+Improvements
+------------
+
+* ``ExpectedException`` now accepts a msg parameter for describing an error,
+  much the same as assertEquals etc. (Robert Collins)
+
+0.9.30
+~~~~~~
+
+A new sort of TestResult, the StreamResult has been added, as a prototype for
+a revised standard library test result API.  Expect this API to change.
+Although we will try to preserve compatibility for early adopters, it is
+experimental and we might need to break it if it turns out to be unsuitable.
+
+Improvements
+------------
+* ``assertRaises`` works properly for exception classes that have custom 
+  metaclasses
+
+* ``ConcurrentTestSuite`` was silently eating exceptions that propagate from
+  the test.run(result) method call. Ignoring them is fine in a normal test
+  runner, but when they happen in a different thread, the thread that called
+  suite.run() is not in the stack anymore, and the exceptions are lost. We now
+  create a synthetic test recording any such exception.
+  (Robert Collins, #1130429)
+
+* Fixed SyntaxError raised in ``_compat2x.py`` when installing via Python 3.
+  (Will Bond, #941958)
+
+* New class ``StreamResult`` which defines the API for the new result type.
+  (Robert Collins)
+
+* New support class ``ConcurrentStreamTestSuite`` for convenient construction
+  and utilisation of ``StreamToQueue`` objects. (Robert Collins)
+
+* New support class ``CopyStreamResult`` which forwards events onto multiple
+  ``StreamResult`` objects (each of which receives all the events).
+  (Robert Collins)
+
+* New support class ``StreamSummary`` which summarises a ``StreamResult``
+  stream compatibly with ``TestResult`` code. (Robert Collins)
+
+* New support class ``StreamTagger`` which adds or removes tags from
+  ``StreamResult`` events. (RobertCollins)
+
+* New support class ``StreamToDict`` which converts a ``StreamResult`` to a
+  series of dicts describing a test. Useful for writing trivial stream
+  analysers. (Robert Collins)
+
+* New support class ``TestControl`` which permits cancelling an in-progress
+  run. (Robert Collins)
+
+* New support class ``StreamFailFast`` which calls a ``TestControl`` instance
+  to abort the test run when a failure is detected. (Robert Collins)
+
+* New support class ``ExtendedToStreamDecorator`` which translates both regular
+  unittest TestResult API calls and the ExtendedTestResult API which testtools
+  has supported into the StreamResult API. ExtendedToStreamDecorator also
+  forwards calls made in the StreamResult API, permitting it to be used
+  anywhere a StreamResult is used. Key TestResult query methods like
+  wasSuccessful and shouldStop are synchronised with the StreamResult API
+  calls, but the detailed statistics like the list of errors are not - a
+  separate consumer will be created to support that.
+  (Robert Collins)
+
+* New support class ``StreamToExtendedDecorator`` which translates
+  ``StreamResult`` API calls into ``ExtendedTestResult`` (or any older
+  ``TestResult``) calls. This permits using un-migrated result objects with
+  new runners / tests. (Robert Collins)
+
+* New support class ``StreamToQueue`` for sending messages to one
+  ``StreamResult`` from multiple threads. (Robert Collins)
+
+* New support class ``TimestampingStreamResult`` which adds a timestamp to
+  events with no timestamp. (Robert Collins)
+
+* New ``TestCase`` decorator ``DecorateTestCaseResult`` that adapts the
+  ``TestResult`` or ``StreamResult`` a case will be run with, for ensuring that
+  a particular result object is used even if the runner running the test doesn't
+  know to use it. (Robert Collins)
+
+* New test support class ``testtools.testresult.doubles.StreamResult``, which
+  captures all the StreamResult events. (Robert Collins)
+
+* ``PlaceHolder`` can now hold tags, and applies them before, and removes them
+  after, the test. (Robert Collins)
+
+* ``PlaceHolder`` can now hold timestamps, and applies them before the test and
+  then before the outcome. (Robert Collins)
+
+* ``StreamResultRouter`` added. This is useful for demultiplexing - e.g. for
+  partitioning analysis of events or sending feedback encapsulated in
+  StreamResult events back to their source. (Robert Collins)
+
+* ``testtools.run.TestProgram`` now supports the ``TestRunner`` taking over
+  responsibility for formatting the output of ``--list-tests``.
+  (Robert Collins)
+
+* The error message for setUp and tearDown upcall errors was broken on Python
+  3.4. (Monty Taylor, Robert Collins, #1140688)
+
+* The repr of object() on pypy includes the object id, which was breaking a
+  test that accidentally depended on the CPython repr for object().
+  (Jonathan Lange)
+
+0.9.29
+~~~~~~
+
+A simple bug fix, and better error messages when you don't up-call.
+
+Changes
+-------
+
+* ``testtools.content_type.ContentType`` incorrectly used ',' rather than ';'
+  to separate parameters. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.compat.unicode_output_stream`` was wrapping a stream encoder
+  around ``io.StringIO`` and ``io.TextIOWrapper`` objects, which was incorrect.
+  (Robert Collins)
+
+* Report the name of the source file for setUp and tearDown upcall errors.
+  (Monty Taylor)
+
+0.9.28
+~~~~~~
+
+Testtools has moved VCS - https://github.com/testing-cabal/testtools/ is
+the new home. Bug tracking is still on Launchpad, and releases are on Pypi.
+
+We made this change to take advantage of the richer ecosystem of tools around
+Git, and to lower the barrier for new contributors.
+
+Improvements
+------------
+
+* New ``testtools.testcase.attr`` and ``testtools.testcase.WithAttributes``
+  helpers allow marking up test case methods with simple labels. This permits
+  filtering tests with more granularity than organising them into modules and
+  test classes. (Robert Collins)
+
+0.9.27
+~~~~~~
+
+Improvements
+------------
+
+* New matcher ``HasLength`` for matching the length of a collection.
+  (Robert Collins)
+
+* New matcher ``MatchesPredicateWithParams`` make it still easier to create
+  ad hoc matchers. (Robert Collins)
+
+* We have a simpler release process in future - see doc/hacking.rst.
+  (Robert Collins)
+
+0.9.26
+~~~~~~
+
+Brown paper bag fix: failed to document the need for setup to be able to use
+extras. Compounded by pip not supporting setup_requires.
+
+Changes
+-------
+
+* setup.py now can generate egg_info even if extras is not available.
+  Also lists extras in setup_requires for easy_install.
+  (Robert Collins, #1102464)
+
+0.9.25
+~~~~~~
+
+Changes
+-------
+
+* ``python -m testtools.run --load-list`` will now preserve any custom suites
+  (such as ``testtools.FixtureSuite`` or ``testresources.OptimisingTestSuite``)
+  rather than flattening them.
+  (Robert Collins, #827175)
+
+* Testtools now depends on extras, a small library split out from it to contain
+  generally useful non-testing facilities. Since extras has been around for a
+  couple of testtools releases now, we're making this into a hard dependency of
+  testtools. (Robert Collins)
+
+* Testtools now uses setuptools rather than distutils so that we can document
+  the extras dependency. (Robert Collins)
+
+Improvements
+------------
+
+* Testtools will no longer override test code registered details called
+  'traceback' when reporting caught exceptions from test code.
+  (Robert Collins, #812793)
+
 0.9.24
 ~~~~~~
 
@@ -206,7 +606,7 @@ Improvements
 * API documentation corrections. (Raphaël Badin)
 
 * ``ConcurrentTestSuite`` now takes an optional ``wrap_result`` parameter
-  that can be used to wrap the ``ThreadsafeForwardingResult``s created by
+  that can be used to wrap the ``ThreadsafeForwardingResults`` created by
   the suite.  (Jonathan Lange)
 
 * ``Tagger`` added.  It's a new ``TestResult`` that tags all tests sent to
diff --git a/lib/testtools/README b/lib/testtools/README
deleted file mode 100644
index dbc685b..0000000
--- a/lib/testtools/README
+++ /dev/null
@@ -1,89 +0,0 @@
-=========
-testtools
-=========
-
-testtools is a set of extensions to the Python standard library's unit testing
-framework.
-
-These extensions have been derived from years of experience with unit testing
-in Python and come from many different sources.
-
-
-Documentation
--------------
-
-If you would like to learn more about testtools, consult our documentation in
-the 'doc/' directory.  You might like to start at 'doc/overview.rst' or
-'doc/for-test-authors.rst'.
-
-
-Licensing
----------
-
-This project is distributed under the MIT license and copyright is owned by
-Jonathan M. Lange and the testtools authors. See LICENSE for details.
-
-Some code in 'testtools/run.py' is taken from Python's unittest module, and is
-copyright Steve Purcell and the Python Software Foundation, it is distributed
-under the same license as Python, see LICENSE for details.
-
-
-Required Dependencies
----------------------
-
- * Python 2.6+ or 3.0+
-
-If you would like to use testtools for earlier Python's, please use testtools
-0.9.15.
-
-
-Optional Dependencies
----------------------
-
-If you would like to use our undocumented, unsupported Twisted support, then
-you will need Twisted.
-
-If you want to use ``fixtures`` then you can either install fixtures (e.g. from
-https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
-or alternatively just make sure your fixture objects obey the same protocol.
-
-
-Bug reports and patches
------------------------
-
-Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
-Patches can also be submitted via Launchpad, or mailed to the author.  You can
-mail the author directly at jml at mumak.net.
-
-There's no mailing list for this project yet, however the testing-in-python
-mailing list may be a useful resource:
-
- * Address: testing-in-python at lists.idyll.org
- * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
-
-
-History
--------
-
-testtools used to be called 'pyunit3k'.  The name was changed to avoid
-conflating the library with the Python 3.0 release (commonly referred to as
-'py3k').
-
-
-Thanks
-------
-
- * Canonical Ltd
- * Bazaar
- * Twisted Matrix Labs
- * Robert Collins
- * Andrew Bennetts
- * Benjamin Peterson
- * Jamu Kakar
- * James Westby
- * Martin [gz]
- * Michael Hudson-Doyle
- * Aaron Bentley
- * Christian Kampka
- * Gavin Panella
- * Martin Pool
diff --git a/lib/testtools/README.rst b/lib/testtools/README.rst
new file mode 100644
index 0000000..cddb594
--- /dev/null
+++ b/lib/testtools/README.rst
@@ -0,0 +1,92 @@
+=========
+testtools
+=========
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework.
+
+These extensions have been derived from years of experience with unit testing
+in Python and come from many different sources.
+
+
+Documentation
+-------------
+
+If you would like to learn more about testtools, consult our documentation in
+the 'doc/' directory.  You might like to start at 'doc/overview.rst' or
+'doc/for-test-authors.rst'.
+
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+Jonathan M. Lange and the testtools authors. See LICENSE for details.
+
+Some code in 'testtools/run.py' is taken from Python's unittest module, and is
+copyright Steve Purcell and the Python Software Foundation, it is distributed
+under the same license as Python, see LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.6+ or 3.0+
+
+If you would like to use testtools for earlier Python's, please use testtools
+0.9.15.
+
+ * extras (helpers that we intend to push into Python itself in the near
+   future).
+
+
+Optional Dependencies
+---------------------
+
+If you would like to use our undocumented, unsupported Twisted support, then
+you will need Twisted.
+
+If you want to use ``fixtures`` then you can either install fixtures (e.g. from
+https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
+or alternatively just make sure your fixture objects obey the same protocol.
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
+Patches should be submitted as Github pull requests, or mailed to the authors.
+See ``doc/hacking.rst`` for more details.
+
+There's no mailing list for this project yet, however the testing-in-python
+mailing list may be a useful resource:
+
+ * Address: testing-in-python at lists.idyll.org
+ * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
+
+
+History
+-------
+
+testtools used to be called 'pyunit3k'.  The name was changed to avoid
+conflating the library with the Python 3.0 release (commonly referred to as
+'py3k').
+
+
+Thanks
+------
+
+ * Canonical Ltd
+ * Bazaar
+ * Twisted Matrix Labs
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
+ * Aaron Bentley
+ * Christian Kampka
+ * Gavin Panella
+ * Martin Pool
diff --git a/lib/testtools/doc/api.rst b/lib/testtools/doc/api.rst
new file mode 100644
index 0000000..425c818
--- /dev/null
+++ b/lib/testtools/doc/api.rst
@@ -0,0 +1,26 @@
+testtools API documentation
+===========================
+
+Generated reference documentation for all the public functionality of
+testtools.
+
+Please :doc:`send patches </hacking>` if you notice anything confusing or
+wrong, or that could be improved.
+
+
+.. toctree::
+   :maxdepth: 2
+
+
+testtools
+---------
+
+.. automodule:: testtools
+   :members:
+
+
+testtools.matchers
+------------------
+
+.. automodule:: testtools.matchers
+   :members:
diff --git a/lib/testtools/doc/for-framework-folk.rst b/lib/testtools/doc/for-framework-folk.rst
index ecc11f3..5c83ab1 100644
--- a/lib/testtools/doc/for-framework-folk.rst
+++ b/lib/testtools/doc/for-framework-folk.rst
@@ -14,13 +14,17 @@ unit-tested project, are trying to get one testing framework to play nicely
 with another or are hacking away at getting your test suite to run in parallel
 over a heterogenous cluster of machines, this guide is for you.
 
-This manual is a summary.  You can get details by consulting the `testtools
-API docs`_.
+This manual is a summary. You can get details by consulting the
+:doc:`testtools API docs </api>`.
 
 
 Extensions to TestCase
 ======================
 
+In addition to the ``TestCase`` specific methods, we have extensions for
+``TestSuite`` that also apply to ``TestCase`` (because ``TestCase`` and
+``TestSuite`` follow the Composite pattern).
+
 Custom exception handling
 -------------------------
 
@@ -46,9 +50,9 @@ provide a custom ``RunTest`` to a ``TestCase``.  The ``RunTest`` object can
 change everything about how the test executes.
 
 To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
-takes a test and an optional list of exception handlers.  Instances returned
-by the factory must have a ``run()`` method that takes an optional ``TestResult``
-object.
+takes a test and an optional list of exception handlers and an optional
+last_resort handler.  Instances returned by the factory must have a ``run()``
+method that takes an optional ``TestResult`` object.
 
 The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
 method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
@@ -78,6 +82,15 @@ Test renaming
 instance to one with a new name.  This is helpful for implementing test
 parameterization.
 
+.. _force_failure:
+
+Delayed Test Failure
+--------------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to True will
+cause ``testtools.RunTest`` to fail the test case after the test has finished.
+This is useful when you want to cause a test to fail, but don't want to
+prevent the remainder of the test code from being executed.
 
 Test placeholders
 =================
@@ -104,9 +117,174 @@ e.g.::
   I record an event                                                   [OK]
 
 
+Test instance decorators
+========================
+
+DecorateTestCaseResult
+----------------------
+
+This object calls out to your code when ``run`` / ``__call__`` are called and
+allows the result object that will be used to run the test to be altered. This
+is very useful when working with a test runner that doesn't know your test case
+requirements. For instance, it can be used to inject a ``unittest2`` compatible
+adapter when someone attempts to run your test suite with a ``TestResult`` that
+does not support ``addSkip`` or other ``unittest2`` methods. Similarly it can
+aid the migration to ``StreamResult``.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite = DecorateTestCaseResult(suite, ExtendedToOriginalDecorator)
+
 Extensions to TestResult
 ========================
 
+StreamResult
+------------
+
+``StreamResult`` is a new API for dealing with test case progress that supports
+concurrent and distributed testing without the various issues that
+``TestResult`` has such as buffering in multiplexers.
+
+The design has several key principles:
+
+* Nothing that requires up-front knowledge of all tests.
+
+* Deal with tests running in concurrent environments, potentially distributed
+  across multiple processes (or even machines). This implies allowing multiple
+  tests to be active at once, supplying time explicitly, being able to
+  differentiate between tests running in different contexts and removing any
+  assumption that tests are necessarily in the same process.
+
+* Make the API as simple as possible - each aspect should do one thing well.
+
+The ``TestResult`` API this is intended to replace has three different clients.
+
+* Each executing ``TestCase`` notifies the ``TestResult`` about activity.
+
+* The testrunner running tests uses the API to find out whether the test run
+  had errors, how many tests ran and so on.
+
+* Finally, each ``TestCase`` queries the ``TestResult`` to see whether the test
+  run should be aborted.
+
+With ``StreamResult`` we need to be able to provide a ``TestResult`` compatible
+adapter (``StreamToExtendedDecorator``) to allow incremental migration.
+However, we don't need to conflate things long term. So - we define three
+separate APIs, and merely mix them together to provide the
+``StreamToExtendedDecorator``. ``StreamResult`` is the first of these APIs -
+meeting the needs of ``TestCase`` clients. It handles events generated by
+running tests. See the API documentation for ``testtools.StreamResult`` for
+details.
+
+StreamSummary
+-------------
+
+Secondly we define the ``StreamSummary`` API which takes responsibility for
+collating errors, detecting incomplete tests and counting tests. This provides
+a compatible API with those aspects of ``TestResult``. Again, see the API
+documentation for ``testtools.StreamSummary``.
+
+TestControl
+-----------
+
+Lastly we define the ``TestControl`` API which is used to provide the
+``shouldStop`` and ``stop`` elements from ``TestResult``. Again, see the API
+documentation for ``testtools.TestControl``. ``TestControl`` can be paired with
+a ``StreamFailFast`` to trigger aborting a test run when a failure is observed.
+Aborting multiple workers in a distributed environment requires hooking
+whatever signalling mechanism the distributed environment has up to a
+``TestControl`` in each worker process.
+
+StreamTagger
+------------
+
+A ``StreamResult`` filter that adds or removes tags from events::
+
+    >>> from testtools import StreamTagger
+    >>> sink = StreamResult()
+    >>> result = StreamTagger([sink], set(['add']), set(['discard']))
+    >>> result.startTestRun()
+    >>> # Run tests against result here.
+    >>> result.stopTestRun()
+
+StreamToDict
+------------
+
+A simplified API for dealing with ``StreamResult`` streams. Each test is
+buffered until it completes and then reported as a trivial dict. This makes
+writing analysers very easy - you can ignore all the plumbing and just work
+with the result. e.g.::
+
+    >>> from testtools import StreamToDict
+    >>> def handle_test(test_dict):
+    ...     print(test_dict['id'])
+    >>> result = StreamToDict(handle_test)
+    >>> result.startTestRun()
+    >>> # Run tests against result here.
+    >>> # At stopTestRun() any incomplete buffered tests are announced.
+    >>> result.stopTestRun()
+
+ExtendedToStreamDecorator
+-------------------------
+
+This is a hybrid object that combines both the ``Extended`` and ``Stream``
+``TestResult`` APIs into one class, but only emits ``StreamResult`` events.
+This is useful when a ``StreamResult`` stream is desired, but you cannot
+be sure that the tests which will run have been updated to the ``StreamResult``
+API.
+
+StreamToExtendedDecorator
+-------------------------
+
+This is a simple converter that emits the ``ExtendedTestResult`` API in
+response to events from the ``StreamResult`` API. Useful when outputting
+``StreamResult`` events from a ``TestCase`` but the supplied ``TestResult``
+does not support the ``status`` and ``file`` methods.
+
+StreamToQueue
+-------------
+
+This is a ``StreamResult`` decorator for reporting tests from multiple threads
+at once. Each method submits an event to a supplied Queue object as a simple
+dict. See ``ConcurrentStreamTestSuite`` for a convenient way to use this.
+
+TimestampingStreamResult
+------------------------
+
+This is a ``StreamResult`` decorator for adding timestamps to events that lack
+them. This allows writing the simplest possible generators of events and
+passing the events via this decorator to get timestamped data. As long as
+no buffering/queueing or blocking happen before the timestamper sees the event
+the timestamp will be as accurate as if the original event had it.
+
+StreamResultRouter
+------------------
+
+This is a ``StreamResult`` which forwards events to an arbitrary set of target
+``StreamResult`` objects. Events that have no forwarding rule are passed onto
+an fallback ``StreamResult`` for processing. The mapping can be changed at
+runtime, allowing great flexibility and responsiveness to changes. Because
+The mapping can change dynamically and there could be the same recipient for
+two different maps, ``startTestRun`` and ``stopTestRun`` handling is fine
+grained and up to the user.
+
+If no fallback has been supplied, an unroutable event will raise an exception.
+
+For instance::
+
+    >>> router = StreamResultRouter()
+    >>> sink = doubles.StreamResult()
+    >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+    ...     consume_route=True)
+    >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+Would remove the ``0/`` from the route_code and forward the event like so::
+
+    >>> sink.status('test_id=foo', route_code='1', test_status='uxsuccess')
+
+See ``pydoc testtools.StreamResultRouter`` for details.
+
 TestResult.addSkip
 ------------------
 
@@ -215,12 +393,29 @@ ConcurrentTestSuite uses the helper to get a number of separate runnable
 objects with a run(result), runs them all in threads using the
 ThreadsafeForwardingResult to coalesce their activity.
 
+ConcurrentStreamTestSuite
+-------------------------
+
+A variant of ConcurrentTestSuite that uses the new StreamResult API instead of
+the TestResult API. ConcurrentStreamTestSuite coordinates running some number
+of test/suites concurrently, with one StreamToQueue per test/suite.
+
+Each test/suite gets given its own ExtendedToStreamDecorator +
+TimestampingStreamResult wrapped StreamToQueue instance, forwarding onto the
+StreamResult that ConcurrentStreamTestSuite.run was called with.
+
+ConcurrentStreamTestSuite is a thin shim and it is easy to implement your own
+specialised form if that is needed.
+
 FixtureSuite
 ------------
 
 A test suite that sets up a fixture_ before running any tests, and then tears
 it down after all of the tests are run. The fixture is *not* made available to
-any of the tests.
+any of the tests due to there being no standard channel for suites to pass
+information to the tests they contain (and we don't have enough data on what
+such a channel would need to achieve to design a good one yet - or even decide
+if it is a good idea).
 
 sorted_tests
 ------------
@@ -229,10 +424,30 @@ Given the composite structure of TestSuite / TestCase, sorting tests is
 problematic - you can't tell what functionality is embedded into custom Suite
 implementations. In order to deliver consistent test orders when using test
 discovery (see http://bugs.python.org/issue16709), testtools flattens and
-sorts tests that have the standard TestSuite, defines a new method sort_tests,
-which can be used by non-standard TestSuites to know when they should sort
-their tests.
+sorts tests that have the standard TestSuite, and defines a new method
+sort_tests, which can be used by non-standard TestSuites to know when they
+should sort their tests. An example implementation can be seen at
+``FixtureSuite.sorted_tests``.
+
+If there are duplicate test ids in a suite, ValueError will be raised.
+
+filter_by_ids
+-------------
+
+Similarly to ``sorted_tests`` running a subset of tests is problematic - the
+standard run interface provides no way to limit what runs. Rather than
+confounding the two problems (selection and execution) we defined a method
+that filters the tests in a suite (or a case) by their unique test id.
+If you a writing custom wrapping suites, consider implementing filter_by_ids
+to support this (though most wrappers that subclass ``unittest.TestSuite`` will
+work just fine [see ``testtools.testsuite.filter_by_ids`` for details.]
+
+Extensions to TestRunner
+========================
+
+To facilitate custom listing of tests, ``testtools.run.TestProgram`` attempts
+to call ``list`` on the ``TestRunner``, falling back to a generic
+implementation if it is not present.
 
-.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
 .. _unittest: http://docs.python.org/library/unittest.html
 .. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/lib/testtools/doc/for-test-authors.rst b/lib/testtools/doc/for-test-authors.rst
index c9e6c6a..5deb7ce 100644
--- a/lib/testtools/doc/for-test-authors.rst
+++ b/lib/testtools/doc/for-test-authors.rst
@@ -11,7 +11,7 @@ automated testing already.
 If you are a test author of an unusually large or unusually unusual test
 suite, you might be interested in :doc:`for-framework-folk`.
 
-You might also be interested in the `testtools API docs`_.
+You might also be interested in the :doc:`testtools API docs </api>`.
 
 
 Introduction
@@ -163,7 +163,8 @@ The first argument to ``ExpectedException`` is the type of exception you
 expect to see raised.  The second argument is optional, and can be either a
 regular expression or a matcher. If it is a regular expression, the ``str()``
 of the raised exception must match the regular expression. If it is a matcher,
-then the raised exception object must match it.
+then the raised exception object must match it. The optional third argument
+``msg`` will cause the raised error to be annotated with that message.
 
 
 assertIn, assertNotIn
@@ -287,6 +288,60 @@ Which is roughly equivalent to::
       self.assertNotEqual(result, 50)
 
 
+``assert_that`` Function
+------------------------
+
+In addition to ``self.assertThat``, testtools also provides the ``assert_that``
+function in ``testtools.assertions`` This behaves like the method version does::
+
+    class TestSquare(TestCase):
+
+        def test_square():
+            result = square(7)
+            assert_that(result, Equals(49))
+
+        def test_square_silly():
+            result = square(7)
+            assert_that(result, Not(Equals(50)))
+
+
+Delayed Assertions
+~~~~~~~~~~~~~~~~~~
+
+A failure in the ``self.assertThat`` method will immediately fail the test: No
+more test code will be run after the assertion failure.
+
+The ``expectThat`` method behaves the same as ``assertThat`` with one
+exception: when failing the test it does so at the end of the test code rather
+than when the mismatch is detected. For example::
+
+  import subprocess
+
+  from testtools import TestCase
+  from testtools.matchers import Equals
+
+
+  class SomeProcessTests(TestCase):
+
+      def test_process_output(self):
+          process = subprocess.Popen(
+            ["my-app", "/some/path"],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+          )
+
+          stdout, stderrr = process.communicate()
+
+          self.expectThat(process.returncode, Equals(0))
+          self.expectThat(stdout, Equals("Expected Output"))
+          self.expectThat(stderr, Equals(""))
+
+In this example, should the ``expectThat`` call fail, the failure will be
+recorded in the test result, but the test will continue as normal. If all
+three assertions fail, the test result will have three failures recorded, and
+the failure details for each failed assertion will be attached to the test
+result.
+
 Stock matchers
 --------------
 
@@ -407,7 +462,7 @@ example::
       except RuntimeError:
           exc_info = sys.exc_info()
       self.assertThat(exc_info, MatchesException(RuntimeError))
-      self.assertThat(exc_info, MatchesException(RuntimeError('bar'))
+      self.assertThat(exc_info, MatchesException(RuntimeError('bar')))
 
 Most of the time, you will want to uses `The raises helper`_ instead.
 
@@ -445,6 +500,18 @@ be able to do, if you think about it::
       self.assertThat('foo', MatchesRegex('fo+'))
 
 
+HasLength
+~~~~~~~~~
+
+Check the length of a collection.  The following assertion will fail::
+
+  self.assertThat([1, 2, 3], HasLength(2))
+
+But this one won't::
+
+  self.assertThat([1, 2, 3], HasLength(3))
+
+
 File- and path-related matchers
 -------------------------------
 
@@ -585,7 +652,7 @@ Used to add custom notes to a matcher.  For example::
   def test_annotate_example(self):
       result = 43
       self.assertThat(
-          result, Annotate("Not the answer to the Question!", Equals(42))
+          result, Annotate("Not the answer to the Question!", Equals(42)))
 
 Since the annotation is only ever displayed when there is a mismatch
 (e.g. when ``result`` does not equal 42), it's a good idea to phrase the note
@@ -613,7 +680,7 @@ matching. This can be used to aid in creating trivial matchers as functions, for
 example::
 
   def test_after_preprocessing_example(self):
-      def HasFileContent(content):
+      def PathHasFileContent(content):
           def _read(path):
               return open(path).read()
           return AfterPreprocessing(_read, Equals(content))
@@ -780,6 +847,35 @@ Which will produce the error message::
   MismatchError: 42 is not prime.
 
 
+MatchesPredicateWithParams
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes you can't use a trivial predicate and instead need to pass in some
+parameters each time. In that case, MatchesPredicateWithParams is your go-to
+tool for creating ad hoc matchers. MatchesPredicateWithParams takes a predicate
+function and message and returns a factory to produce matchers from that. The
+predicate needs to return a boolean (or any truthy object), and accept the
+object to match + whatever was passed into the factory.
+
+For example, you might have an ``divisible`` function and want to make a
+matcher based on it::
+
+  def test_divisible_numbers(self):
+      IsDivisibleBy = MatchesPredicateWithParams(
+          divisible, '{0} is not divisible by {1}')
+      self.assertThat(7, IsDivisibleBy(1))
+      self.assertThat(7, IsDivisibleBy(7))
+      self.assertThat(7, IsDivisibleBy(2))
+      # This will fail.
+
+Which will produce the error message::
+
+  Traceback (most recent call last):
+    File "...", line ..., in test_divisible
+      self.assertThat(7, IsDivisibleBy(2))
+  MismatchError: 7 is not divisible by 2.
+
+
 Raises
 ~~~~~~
 
@@ -838,9 +934,9 @@ returns a non-None value.  For example::
 
   def test_is_divisible_by_example(self):
       # This succeeds, since IsDivisibleBy(5).match(10) returns None.
-      self.assertThat(10, IsDivisbleBy(5))
+      self.assertThat(10, IsDivisibleBy(5))
       # This fails, since IsDivisibleBy(7).match(10) returns a mismatch.
-      self.assertThat(10, IsDivisbleBy(7))
+      self.assertThat(10, IsDivisibleBy(7))
 
 The mismatch is responsible for what sort of error message the failing test
 generates.  Here's an example mismatch::
@@ -1201,6 +1297,13 @@ Here are some tips for converting your Trial tests into testtools tests.
   ``AsynchronousDeferredRunTest`` does not.  If you rely on this behavior, use
   ``AsynchronousDeferredRunTestForBrokenTwisted``.
 
+force_failure
+-------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to ``True``
+will cause the test to be marked as a failure, but won't stop the test code
+from running (see :ref:`force_failure`).
+
 
 Test helpers
 ============
@@ -1280,6 +1383,29 @@ details of certain variables don't actually matter.
 See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed
 discussion of creation methods.
 
+Test attributes
+---------------
+
+Inspired by the ``nosetests`` ``attr`` plugin, testtools provides support for
+marking up test methods with attributes, which are then exposed in the test
+id and can be used when filtering tests by id. (e.g. via ``--load-list``)::
+
+  from testtools.testcase import attr, WithAttributes
+
+  class AnnotatedTests(WithAttributes, TestCase):
+
+      @attr('simple')
+      def test_one(self):
+          pass
+
+      @attr('more', 'than', 'one')
+      def test_two(self):
+          pass
+
+      @attr('or')
+      @attr('stacked')
+      def test_three(self):
+          pass
 
 General helpers
 ===============
@@ -1288,7 +1414,7 @@ Conditional imports
 -------------------
 
 Lots of the time we would like to conditionally import modules.  testtools
-needs to do this itself, and graciously extends the ability to its users.
+uses the small library extras to do this. This used to be part of testtools.
 
 Instead of::
 
@@ -1317,9 +1443,9 @@ You can do::
 Safe attribute testing
 ----------------------
 
-``hasattr`` is broken_ on many versions of Python.  testtools provides
-``safe_hasattr``, which can be used to safely test whether an object has a
-particular attribute.
+``hasattr`` is broken_ on many versions of Python. The helper ``safe_hasattr``
+can be used to safely test whether an object has a particular attribute. Like
+``try_import`` this used to be in testtools but is now in extras.
 
 
 Nullary callables
@@ -1354,7 +1480,6 @@ Here, ``repr(nullary)`` will be the same as ``repr(f)``.
 .. _doctest: http://docs.python.org/library/doctest.html
 .. _Deferred: http://twistedmatrix.com/documents/current/core/howto/defer.html
 .. _discover: http://pypi.python.org/pypi/discover
-.. _`testtools API docs`: http://mumak.net/testtools/apidocs/
 .. _Distutils: http://docs.python.org/library/distutils.html
 .. _`setup configuration`: http://docs.python.org/distutils/configfile.html
 .. _broken: http://chipaca.com/post/3210673069/hasattr-17-less-harmful
diff --git a/lib/testtools/doc/hacking.rst b/lib/testtools/doc/hacking.rst
index 663eeac..ccfc155 100644
--- a/lib/testtools/doc/hacking.rst
+++ b/lib/testtools/doc/hacking.rst
@@ -2,6 +2,13 @@
 Contributing to testtools
 =========================
 
+Bugs and patches
+----------------
+
+`File bugs <https://bugs.launchpad.net/testtools/+filebug>` on Launchpad, and
+`send patches <https://github.com/testing-cabal/testtools/>` on Github.
+
+
 Coding style
 ------------
 
@@ -49,6 +56,21 @@ is often useful to see all levels of the stack. To do this, add
 ``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
 
 
+Discussion
+----------
+
+When submitting a patch, it will help the review process a lot if there's a
+clear explanation of what the change does and why you think the change is a
+good idea.  For crasher bugs, this is generally a no-brainer, but for UI bugs
+& API tweaks, the reason something is an improvement might not be obvious, so
+it's worth spelling out.
+
+If you are thinking of implementing a new feature, you might want to have that
+discussion on the [mailing list](testtools-dev at lists.launchpad.net) before the
+patch goes up for review.  This is not at all mandatory, but getting feedback
+early can help avoid dead ends.
+
+
 Documentation
 -------------
 
@@ -63,7 +85,7 @@ Source layout
 -------------
 
 The top-level directory contains the ``testtools/`` package directory, and
-miscellaneous files like ``README`` and ``setup.py``.
+miscellaneous files like ``README.rst`` and ``setup.py``.
 
 The ``testtools/`` directory is the Python package itself.  It is separated
 into submodules for internal clarity, but all public APIs should be “promoted”
@@ -78,13 +100,13 @@ Tests belong in ``testtools/tests/``.
 Committing to trunk
 -------------------
 
-Testtools is maintained using bzr, with its trunk at lp:testtools. This gives
-every contributor the ability to commit their work to their own branches.
-However permission must be granted to allow contributors to commit to the trunk
-branch.
+Testtools is maintained using git, with its master repo at
+https://github.com/testing-cabal/testtools. This gives every contributor the
+ability to commit their work to their own branches. However permission must be
+granted to allow contributors to commit to the trunk branch.
 
-Commit access to trunk is obtained by joining the testtools-committers
-Launchpad team. Membership in this team is contingent on obeying the testtools
+Commit access to trunk is obtained by joining the `testing-cabal`_, either as an
+Owner or a Committer. Commit access is contingent on obeying the testtools
 contribution policy, see `Copyright Assignment`_ above.
 
 
@@ -92,22 +114,34 @@ Code Review
 -----------
 
 All code must be reviewed before landing on trunk. The process is to create a
-branch in launchpad, and submit it for merging to lp:testtools. It will then
-be reviewed before it can be merged to trunk. It will be reviewed by someone:
+branch on Github, and make a pull request into trunk. It will then be reviewed
+before it can be merged to trunk. It will be reviewed by someone:
 
 * not the author
-* a committer (member of the `~testtools-committers`_ team)
+* a committer
 
-As a special exception, while the testtools committers team is small and prone
-to blocking, a merge request from a committer that has not been reviewed after
-24 hours may be merged by that committer. When the team is larger this policy
-will be revisited.
+As a special exception, since there are few testtools committers and thus
+reviews are prone to blocking, a pull request from a committer that has not been
+reviewed after 24 hours may be merged by that committer. When the team is larger
+this policy will be revisited.
 
 Code reviewers should look for the quality of what is being submitted,
 including conformance with this HACKING file.
 
 Changes which all users should be made aware of should be documented in NEWS.
 
+We are now in full backwards compatibility mode - no more releases < 1.0.0, and 
+breaking compatibility will require consensus on the testtools-dev mailing list.
+Exactly what constitutes a backwards incompatible change is vague, but coarsely:
+
+* adding required arguments or required calls to something that used to work
+* removing keyword or position arguments, removing methods, functions or modules
+* changing behaviour someone may have reasonably depended on
+
+Some things are not compatibility issues:
+
+* changes to _ prefixed methods, functions, modules, packages.
+
 
 NEWS management
 ---------------
@@ -119,35 +153,42 @@ branches, the bullet points are kept alphabetically sorted. The release NEXT is
 permanently present at the top of the list.
 
 
-Release tasks
--------------
+Releasing
+---------
+
+Prerequisites
++++++++++++++
+
+Membership in the testing-cabal org on github as committer.
+
+Membership in the pypi testtools project as maintainer.
+
+Membership in the https://launchpad.net/~testtools-committers.
+
+Tasks
++++++
 
 #. Choose a version number, say X.Y.Z
-#. Branch from trunk to testtools-X.Y.Z
-#. In testtools-X.Y.Z, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
-#. Replace NEXT in NEWS with the version number X.Y.Z, adjusting the reST.
+#. In trunk, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
+#. Under NEXT in NEWS add a heading with the version number X.Y.Z.
 #. Possibly write a blurb into NEWS.
-#. Replace any additional references to NEXT with the version being
-   released. (There should be none other than the ones in these release tasks
-   which should not be replaced).
 #. Commit the changes.
-#. Tag the release, bzr tag testtools-X.Y.Z
+#. Tag the release, ``git tag -s testtools-X.Y.Z``
 #. Run 'make release', this:
    #. Creates a source distribution and uploads to PyPI
    #. Ensures all Fix Committed bugs are in the release milestone
    #. Makes a release on Launchpad and uploads the tarball
    #. Marks all the Fix Committed bugs as Fix Released
    #. Creates a new milestone
-#. Merge the release branch testtools-X.Y.Z into trunk. Before the commit,
-   add a NEXT heading to the top of NEWS and bump the version in __init__.py
+#. Change __version__ in __init__.py to the probable next version.
    e.g. to ``(X, Y, Z+1, 'dev', 0)``.
-#. Push trunk to Launchpad
+#. Commit 'Opening X.Y.Z+1 for development.'
 #. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
+#. Push trunk to Github, ``git push --tags origin master``
 
 .. _PEP 8: http://www.python.org/dev/peps/pep-0008/
 .. _unittest: http://docs.python.org/library/unittest.html
-.. _~testtools-committers: https://launchpad.net/~testtools-committers
 .. _MIT license: http://www.opensource.org/licenses/mit-license.php
 .. _Sphinx: http://sphinx.pocoo.org/
 .. _restructuredtext: http://docutils.sourceforge.net/rst.html
-
+.. _testing-cabal: https://github.com/organizations/testing-cabal/
diff --git a/lib/testtools/doc/index.rst b/lib/testtools/doc/index.rst
index bac47e4..a6c05a9 100644
--- a/lib/testtools/doc/index.rst
+++ b/lib/testtools/doc/index.rst
@@ -25,7 +25,7 @@ Contents:
    for-framework-folk
    hacking
    Changes to testtools <news>
-   API reference documentation <http://mumak.net/testtools/apidocs/>
+   API reference documentation <api>
 
 Indices and tables
 ==================
diff --git a/lib/testtools/doc/overview.rst b/lib/testtools/doc/overview.rst
index cb72893..a01dc3d 100644
--- a/lib/testtools/doc/overview.rst
+++ b/lib/testtools/doc/overview.rst
@@ -5,10 +5,7 @@ testtools: tasteful testing for Python
 testtools is a set of extensions to the Python standard library's unit testing
 framework. These extensions have been derived from many years of experience
 with unit testing in Python and come from many different sources. testtools
-supports Python versions all the way back to Python 2.4. The next release of
-testtools will change that to support versions that are maintained by the
-Python community instead, to allow the use of modern language features within
-testtools.
+supports Python versions all the way back to Python 2.6.
 
 What better way to start than with a contrived code snippet?::
 
@@ -29,7 +26,7 @@ What better way to start than with a contrived code snippet?::
       def attach_log_file(self):
           self.addDetail(
               'log-file',
-              Content(UTF8_TEXT
+              Content(UTF8_TEXT,
                       lambda: open(self.server.logfile, 'r').readlines()))
 
       def test_server_is_cool(self):
@@ -96,7 +93,9 @@ Cross-Python compatibility
 --------------------------
 
 testtools gives you the very latest in unit testing technology in a way that
-will work with Python 2.6, 2.7 and 3.1.
+will work with Python 2.6, 2.7, 3.1 and 3.2.
 
 If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
-0.9.15.
+0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+constraints involved in not using the newer language features onerous as we
+added more support for versions post Python 3.
diff --git a/lib/testtools/scripts/_lp_release.py b/lib/testtools/scripts/_lp_release.py
index 20afd01..ac27e47 100644
--- a/lib/testtools/scripts/_lp_release.py
+++ b/lib/testtools/scripts/_lp_release.py
@@ -108,7 +108,8 @@ def get_release_notes_and_changelog(news_path):
         for line in news:
             line = line.strip()
             if state is None:
-                if is_heading_marker(line, '~'):
+                if (is_heading_marker(line, '~') and
+                    not last_line.startswith('NEXT')):
                     milestone_name = last_line
                     state = 'release-notes'
                 else:
@@ -222,7 +223,8 @@ def release_project(launchpad, project_name, next_milestone_name):
 
 
 def main(args):
-    launchpad = Launchpad.login_with(APP_NAME, SERVICE_ROOT, CACHE_DIR)
+    launchpad = Launchpad.login_with(
+        APP_NAME, SERVICE_ROOT, CACHE_DIR, credentials_file='.lp_creds')
     return release_project(launchpad, PROJECT_NAME, NEXT_MILESTONE_NAME)
 
 
diff --git a/lib/testtools/setup.py b/lib/testtools/setup.py
index 7ecd6d2..dacbf91 100755
--- a/lib/testtools/setup.py
+++ b/lib/testtools/setup.py
@@ -1,22 +1,24 @@
 #!/usr/bin/env python
 """Distutils installer for testtools."""
 
-from distutils.core import setup
+from setuptools import setup
+from distutils.command.build_py import build_py
 import email
 import os
+import sys
 
 import testtools
+cmd_class = {}
+if getattr(testtools, 'TestCommand', None) is not None:
+    cmd_class['test'] = testtools.TestCommand
 
 
-def get_revno():
-    import bzrlib.errors
-    import bzrlib.workingtree
-    try:
-        t = bzrlib.workingtree.WorkingTree.open_containing(__file__)[0]
-    except (bzrlib.errors.NotBranchError, bzrlib.errors.NoWorkingTree):
-        return None
-    else:
-        return t.branch.revno()
+class testtools_build_py(build_py):
+    def build_module(self, module, module_file, package):
+        if sys.version_info >= (3,) and module == '_compat2x':
+            return
+        return build_py.build_module(self, module, module_file, package)
+cmd_class['build_py'] = testtools_build_py
 
 
 def get_version_from_pkg_info():
@@ -43,18 +45,10 @@ def get_version():
     pkg_info_version = get_version_from_pkg_info()
     if pkg_info_version:
         return pkg_info_version
-    revno = get_revno()
-    if revno is None:
-        # Apparently if we just say "snapshot" then distribute won't accept it
-        # as satisfying versioned dependencies. This is a problem for the
-        # daily build version.
-        return "snapshot-%s" % (version,)
-    if phase == 'alpha':
-        # No idea what the next version will be
-        return 'next-r%s' % revno
-    else:
-        # Preserve the version number but give it a revno prefix
-        return version + '-r%s' % revno
+    # Apparently if we just say "snapshot" then distribute won't accept it
+    # as satisfying versioned dependencies. This is a problem for the
+    # daily build version.
+    return "snapshot-%s" % (version,)
 
 
 def get_long_description():
@@ -66,7 +60,7 @@ def get_long_description():
 setup(name='testtools',
       author='Jonathan M. Lange',
       author_email='jml+testtools at mumak.net',
-      url='https://launchpad.net/testtools',
+      url='https://github.com/testing-cabal/testtools',
       description=('Extensions to the Python standard library unit testing '
                    'framework'),
       long_description=get_long_description(),
@@ -81,5 +75,12 @@ setup(name='testtools',
         'testtools.tests',
         'testtools.tests.matchers',
         ],
-      cmdclass={'test': testtools.TestCommand},
-      zip_safe=False)
+      cmdclass=cmd_class,
+      zip_safe=False,
+      install_requires=[
+        'extras',
+        # 'mimeparse' has not been uploaded by the maintainer with Python3 compat
+        # but someone kindly uploaded a fixed version as 'python-mimeparse'.
+        'python-mimeparse',
+        ],
+      )
diff --git a/lib/testtools/testtools/__init__.py b/lib/testtools/testtools/__init__.py
index d722ce5..973083a 100644
--- a/lib/testtools/testtools/__init__.py
+++ b/lib/testtools/testtools/__init__.py
@@ -4,10 +4,14 @@
 
 __all__ = [
     'clone_test_with_new_id',
+    'CopyStreamResult',
     'ConcurrentTestSuite',
+    'ConcurrentStreamTestSuite',
+    'DecorateTestCaseResult',
     'ErrorHolder',
     'ExpectedException',
     'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
     'FixtureSuite',
     'iterate_tests',
     'MultipleExceptions',
@@ -25,54 +29,86 @@ __all__ = [
     'skip',
     'skipIf',
     'skipUnless',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamResultRouter',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
+    'TestControl',
     'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
     'try_import',
     'try_imports',
     ]
 
-from testtools.helpers import (
-    try_import,
-    try_imports,
-    )
-from testtools.matchers._impl import (
-    Matcher,
-    )
+# Compat - removal announced in 0.9.25.
+try:
+    from extras import (
+        try_import,
+        try_imports,
+        )
+except ImportError:
+    # Support reading __init__ for __version__ without extras, because pip does
+    # not support setup_requires.
+    pass
+else:
+
+    from testtools.matchers._impl import (
+        Matcher,
+        )
 # Shut up, pyflakes. We are importing for documentation, not for namespacing.
-Matcher
+    Matcher
 
-from testtools.runtest import (
-    MultipleExceptions,
-    RunTest,
-    )
-from testtools.testcase import (
-    ErrorHolder,
-    ExpectedException,
-    PlaceHolder,
-    TestCase,
-    clone_test_with_new_id,
-    run_test_with,
-    skip,
-    skipIf,
-    skipUnless,
-    )
-from testtools.testresult import (
-    ExtendedToOriginalDecorator,
-    MultiTestResult,
-    Tagger,
-    TestByTestResult,
-    TestResult,
-    TestResultDecorator,
-    TextTestResult,
-    ThreadsafeForwardingResult,
-    )
-from testtools.testsuite import (
-    ConcurrentTestSuite,
-    FixtureSuite,
-    iterate_tests,
-    )
-from testtools.distutilscmd import (
-    TestCommand,
-)
+    from testtools.runtest import (
+        MultipleExceptions,
+        RunTest,
+        )
+    from testtools.testcase import (
+        DecorateTestCaseResult,
+        ErrorHolder,
+        ExpectedException,
+        PlaceHolder,
+        TestCase,
+        clone_test_with_new_id,
+        run_test_with,
+        skip,
+        skipIf,
+        skipUnless,
+        )
+    from testtools.testresult import (
+        CopyStreamResult,
+        ExtendedToOriginalDecorator,
+        ExtendedToStreamDecorator,
+        MultiTestResult,
+        StreamFailFast,
+        StreamResult,
+        StreamResultRouter,
+        StreamSummary,
+        StreamTagger,
+        StreamToDict,
+        StreamToExtendedDecorator,
+        StreamToQueue,
+        Tagger,
+        TestByTestResult,
+        TestControl,
+        TestResult,
+        TestResultDecorator,
+        TextTestResult,
+        ThreadsafeForwardingResult,
+        TimestampingStreamResult,
+        )
+    from testtools.testsuite import (
+        ConcurrentTestSuite,
+        ConcurrentStreamTestSuite,
+        FixtureSuite,
+        iterate_tests,
+        )
+    from testtools.distutilscmd import (
+        TestCommand,
+        )
 
 # same format as sys.version_info: "A tuple containing the five components of
 # the version number: major, minor, micro, releaselevel, and serial. All
@@ -86,4 +122,4 @@ from testtools.distutilscmd import (
 # If the releaselevel is 'final', then the tarball will be major.minor.micro.
 # Otherwise it is major.minor.micro~$(revno).
 
-__version__ = (0, 9, 25, 'dev', 0)
+__version__ = (1, 1, 0, 'final', 0)
diff --git a/lib/testtools/testtools/assertions.py b/lib/testtools/testtools/assertions.py
new file mode 100644
index 0000000..87fa74b
--- /dev/null
+++ b/lib/testtools/testtools/assertions.py
@@ -0,0 +1,22 @@
+from testtools.matchers import (
+    Annotate,
+    MismatchError,
+    )
+
+
+def assert_that(matchee, matcher, message='', verbose=False):
+    """Assert that matchee is matched by matcher.
+
+    This should only be used when you need to use a function based
+    matcher, assertThat in Testtools.Testcase is prefered and has more
+    features
+
+    :param matchee: An object to match with matcher.
+    :param matcher: An object meeting the testtools.Matcher protocol.
+    :raises MismatchError: When matcher does not match thing.
+    """
+    matcher = Annotate.if_message(message, matcher)
+    mismatch = matcher.match(matchee)
+    if not mismatch:
+        return
+    raise MismatchError(matchee, matcher, mismatch, verbose)
diff --git a/lib/testtools/testtools/compat.py b/lib/testtools/testtools/compat.py
index 375eca2..226a22b 100644
--- a/lib/testtools/testtools/compat.py
+++ b/lib/testtools/testtools/compat.py
@@ -19,6 +19,7 @@ __all__ = [
     ]
 
 import codecs
+import io
 import linecache
 import locale
 import os
@@ -27,14 +28,14 @@ import sys
 import traceback
 import unicodedata
 
-from testtools.helpers import try_imports
+from extras import try_imports
 
 BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
 StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
 
 try:
     from testtools import _compat2x as _compat
-except SyntaxError:
+except (SyntaxError, ImportError):
     from testtools import _compat3x as _compat
 
 reraise = _compat.reraise
@@ -87,34 +88,6 @@ else:
 _u.__doc__ = __u_doc
 
 
-if sys.version_info > (2, 5):
-    all = all
-    _error_repr = BaseException.__repr__
-    def isbaseexception(exception):
-        """Return whether exception inherits from BaseException only"""
-        return (isinstance(exception, BaseException)
-            and not isinstance(exception, Exception))
-else:
-    def all(iterable):
-        """If contents of iterable all evaluate as boolean True"""
-        for obj in iterable:
-            if not obj:
-                return False
-        return True
-    def _error_repr(exception):
-        """Format an exception instance as Python 2.5 and later do"""
-        return exception.__class__.__name__ + repr(exception.args)
-    def isbaseexception(exception):
-        """Return whether exception would inherit from BaseException only
-
-        This approximates the hierarchy in Python 2.5 and later, compare the
-        difference between the diagrams at the bottom of the pages:
-        <http://docs.python.org/release/2.4.4/lib/module-exceptions.html>
-        <http://docs.python.org/release/2.5.4/lib/module-exceptions.html>
-        """
-        return isinstance(exception, (KeyboardInterrupt, SystemExit))
-
-
 # GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces,
 #                there should be better ways to write code needing this.
 if not issubclass(getattr(builtins, "bytes", str), str):
@@ -169,7 +142,7 @@ def text_repr(text, multiline=None):
     prefix = repr(text[:0])[:-2]
     if multiline:
         # To escape multiline strings, split and process each line in turn,
-        # making sure that quotes are not escaped. 
+        # making sure that quotes are not escaped.
         if is_py3k:
             offset = len(prefix) + 1
             lines = []
@@ -215,14 +188,15 @@ def unicode_output_stream(stream):
     The wrapper only allows unicode to be written, not non-ascii bytestrings,
     which is a good thing to ensure sanity and sanitation.
     """
-    if sys.platform == "cli":
-        # Best to never encode before writing in IronPython
+    if (sys.platform == "cli" or
+        isinstance(stream, (io.TextIOWrapper, io.StringIO))):
+        # Best to never encode before writing in IronPython, or if it is
+        # already a TextIO [which in the io library has no encoding
+        # attribute).
         return stream
     try:
         writer = codecs.getwriter(stream.encoding or "")
     except (AttributeError, LookupError):
-        # GZ 2010-06-16: Python 3 StringIO ends up here, but probably needs
-        #                different handling as it doesn't want bytestrings
         return codecs.getwriter("ascii")(stream, "replace")
     if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
         # The current stream has a unicode encoding so no error handler is needed
@@ -324,31 +298,33 @@ def _exception_to_text(evalue):
     return None
 
 
-# GZ 2010-05-23: This function is huge and horrible and I welcome suggestions
-#                on the best way to break it up
-_TB_HEADER = _u('Traceback (most recent call last):\n')
-def _format_exc_info(eclass, evalue, tb, limit=None):
-    """Format a stack trace and the exception information as unicode
+def _format_stack_list(stack_lines):
+    """Format 'stack_lines' and return a list of unicode strings.
 
-    Compatibility function for Python 2 which ensures each component of a
-    traceback is correctly decoded according to its origins.
-
-    Based on traceback.format_exception and related functions.
+    :param stack_lines: A list of filename, lineno, name, and line variables,
+        probably obtained by calling traceback.extract_tb or
+        traceback.extract_stack.
     """
     fs_enc = sys.getfilesystemencoding()
-    if tb:
-        list = [_TB_HEADER]
-        extracted_list = []
-        for filename, lineno, name, line in traceback.extract_tb(tb, limit):
+    extracted_list = []
+    for filename, lineno, name, line in stack_lines:
             extracted_list.append((
                 filename.decode(fs_enc, "replace"),
                 lineno,
                 name.decode("ascii", "replace"),
                 line and line.decode(
                     _get_source_encoding(filename), "replace")))
-        list.extend(traceback.format_list(extracted_list))
-    else:
-        list = []
+    return traceback.format_list(extracted_list)
+
+
+def _format_exception_only(eclass, evalue):
+    """Format the excption part of a traceback.
+
+    :param eclass: The type of the exception being formatted.
+    :param evalue: The exception instance.
+    :returns: A list of unicode strings.
+    """
+    list = []
     if evalue is None:
         # Is a (deprecated) string exception
         list.append((eclass + "\n").decode("ascii", "replace"))
@@ -377,6 +353,7 @@ def _format_exc_info(eclass, evalue, tb, limit=None):
                 else:
                     line = line.decode("ascii", "replace")
             if filename:
+                fs_enc = sys.getfilesystemencoding()
                 filename = filename.decode(fs_enc, "replace")
             evalue = eclass(msg, (filename, lineno, offset, line))
             list.extend(traceback.format_exception_only(eclass, evalue))
@@ -387,7 +364,24 @@ def _format_exc_info(eclass, evalue, tb, limit=None):
         list.append("%s: %s\n" % (sclass, svalue))
     elif svalue is None:
         # GZ 2010-05-24: Not a great fallback message, but keep for the moment
-        list.append("%s: <unprintable %s object>\n" % (sclass, sclass))
+        list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass)))
     else:
-        list.append("%s\n" % sclass)
+        list.append(_u("%s\n" % sclass))
     return list
+
+
+_TB_HEADER = _u('Traceback (most recent call last):\n')
+
+
+def _format_exc_info(eclass, evalue, tb, limit=None):
+    """Format a stack trace and the exception information as unicode
+
+    Compatibility function for Python 2 which ensures each component of a
+    traceback is correctly decoded according to its origins.
+
+    Based on traceback.format_exception and related functions.
+    """
+    return [_TB_HEADER] \
+        + _format_stack_list(traceback.extract_tb(tb, limit)) \
+        + _format_exception_only(eclass, evalue)
+
diff --git a/lib/testtools/testtools/content.py b/lib/testtools/testtools/content.py
index 8bd4a22..401004b 100644
--- a/lib/testtools/testtools/content.py
+++ b/lib/testtools/testtools/content.py
@@ -7,18 +7,29 @@ __all__ = [
     'Content',
     'content_from_file',
     'content_from_stream',
+    'json_content',
     'text_content',
     'TracebackContent',
     ]
 
 import codecs
+import inspect
 import json
 import os
 import sys
 import traceback
 
-from testtools import try_import
-from testtools.compat import _b, _format_exc_info, str_is_unicode, _u
+from extras import try_import
+
+from testtools.compat import (
+    _b,
+    _format_exception_only,
+    _format_stack_list,
+    _isbytes,
+    _TB_HEADER,
+    _u,
+    str_is_unicode,
+)
 from testtools.content_type import ContentType, JSON, UTF8_TEXT
 
 
@@ -102,28 +113,25 @@ class Content(object):
     def _iter_text(self):
         """Worker for iter_text - does the decoding."""
         encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
-        try:
-            # 2.5+
-            decoder = codecs.getincrementaldecoder(encoding)()
-            for bytes in self.iter_bytes():
-                yield decoder.decode(bytes)
-            final = decoder.decode(_b(''), True)
-            if final:
-                yield final
-        except AttributeError:
-            # < 2.5
-            bytes = ''.join(self.iter_bytes())
-            yield bytes.decode(encoding)
+        decoder = codecs.getincrementaldecoder(encoding)()
+        for bytes in self.iter_bytes():
+            yield decoder.decode(bytes)
+        final = decoder.decode(_b(''), True)
+        if final:
+            yield final
 
     def __repr__(self):
         return "<Content type=%r, value=%r>" % (
             self.content_type, _join_b(self.iter_bytes()))
 
 
-class TracebackContent(Content):
-    """Content object for tracebacks.
+class StackLinesContent(Content):
+    """Content object for stack lines.
+
+    This adapts a list of "preprocessed" stack lines into a content object.
+    The stack lines are most likely produced from ``traceback.extract_stack``
+    or ``traceback.extract_tb``.
 
-    This adapts an exc_info tuple to the Content interface.
     text/x-traceback;language=python is used for the mime type, in order to
     provide room for other languages to format their tracebacks differently.
     """
@@ -133,65 +141,113 @@ class TracebackContent(Content):
     # system-under-test is rarely unittest or testtools.
     HIDE_INTERNAL_STACK = True
 
-    def __init__(self, err, test):
-        """Create a TracebackContent for err."""
-        if err is None:
-            raise ValueError("err may not be None")
+    def __init__(self, stack_lines, prefix_content="", postfix_content=""):
+        """Create a StackLinesContent for ``stack_lines``.
+
+        :param stack_lines: A list of preprocessed stack lines, probably
+            obtained by calling ``traceback.extract_stack`` or
+            ``traceback.extract_tb``.
+        :param prefix_content: If specified, a unicode string to prepend to the
+            text content.
+        :param postfix_content: If specified, a unicode string to append to the
+            text content.
+        """
         content_type = ContentType('text', 'x-traceback',
             {"language": "python", "charset": "utf8"})
-        value = self._exc_info_to_unicode(err, test)
-        super(TracebackContent, self).__init__(
+        value = prefix_content + \
+            self._stack_lines_to_unicode(stack_lines) + \
+            postfix_content
+        super(StackLinesContent, self).__init__(
             content_type, lambda: [value.encode("utf8")])
 
-    def _exc_info_to_unicode(self, err, test):
-        """Converts a sys.exc_info()-style tuple of values into a string.
-
-        Copied from Python 2.7's unittest.TestResult._exc_info_to_string.
+    def _stack_lines_to_unicode(self, stack_lines):
+        """Converts a list of pre-processed stack lines into a unicode string.
         """
-        exctype, value, tb = err
-        # Skip test runner traceback levels
-        if self.HIDE_INTERNAL_STACK:
-            while tb and self._is_relevant_tb_level(tb):
-                tb = tb.tb_next
 
         # testtools customization. When str is unicode (e.g. IronPython,
         # Python 3), traceback.format_exception returns unicode. For Python 2,
         # it returns bytes. We need to guarantee unicode.
         if str_is_unicode:
-            format_exception = traceback.format_exception
+            format_stack_lines = traceback.format_list
         else:
-            format_exception = _format_exc_info
+            format_stack_lines = _format_stack_list
 
-        if (self.HIDE_INTERNAL_STACK and test.failureException
-            and isinstance(value, test.failureException)):
-            # Skip assert*() traceback levels
-            length = self._count_relevant_tb_levels(tb)
-            msgLines = format_exception(exctype, value, tb, length)
-        else:
-            msgLines = format_exception(exctype, value, tb)
-
-        if getattr(self, 'buffer', None):
-            output = sys.stdout.getvalue()
-            error = sys.stderr.getvalue()
-            if output:
-                if not output.endswith('\n'):
-                    output += '\n'
-                msgLines.append(STDOUT_LINE % output)
-            if error:
-                if not error.endswith('\n'):
-                    error += '\n'
-                msgLines.append(STDERR_LINE % error)
-        return ''.join(msgLines)
-
-    def _is_relevant_tb_level(self, tb):
-        return '__unittest' in tb.tb_frame.f_globals
-
-    def _count_relevant_tb_levels(self, tb):
-        length = 0
+        msg_lines = format_stack_lines(stack_lines)
+
+        return ''.join(msg_lines)
+
+
+def TracebackContent(err, test):
+    """Content object for tracebacks.
+
+    This adapts an exc_info tuple to the Content interface.
+    text/x-traceback;language=python is used for the mime type, in order to
+    provide room for other languages to format their tracebacks differently.
+    """
+    if err is None:
+        raise ValueError("err may not be None")
+
+    exctype, value, tb = err
+    # Skip test runner traceback levels
+    if StackLinesContent.HIDE_INTERNAL_STACK:
+        while tb and '__unittest' in tb.tb_frame.f_globals:
+            tb = tb.tb_next
+
+    # testtools customization. When str is unicode (e.g. IronPython,
+    # Python 3), traceback.format_exception_only returns unicode. For Python 2,
+    # it returns bytes. We need to guarantee unicode.
+    if str_is_unicode:
+        format_exception_only = traceback.format_exception_only
+    else:
+        format_exception_only = _format_exception_only
+
+    limit = None
+    # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
+    if (False
+        and StackLinesContent.HIDE_INTERNAL_STACK
+        and test.failureException
+        and isinstance(value, test.failureException)):
+        # Skip assert*() traceback levels
+        limit = 0
         while tb and not self._is_relevant_tb_level(tb):
-            length += 1
+            limit += 1
             tb = tb.tb_next
-        return length
+
+    prefix = _TB_HEADER
+    stack_lines = traceback.extract_tb(tb, limit)
+    postfix = ''.join(format_exception_only(exctype, value))
+
+    return StackLinesContent(stack_lines, prefix, postfix)
+
+
+def StacktraceContent(prefix_content="", postfix_content=""):
+    """Content object for stack traces.
+
+    This function will create and return a content object that contains a
+    stack trace.
+
+    The mime type is set to 'text/x-traceback;language=python', so other
+    languages can format their stack traces differently.
+
+    :param prefix_content: A unicode string to add before the stack lines.
+    :param postfix_content: A unicode string to add after the stack lines.
+    """
+    stack = inspect.stack()[1:]
+
+    if StackLinesContent.HIDE_INTERNAL_STACK:
+        limit = 1
+        while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals:
+            limit += 1
+    else:
+        limit = -1
+
+    frames_only = [line[0] for line in stack[:limit]]
+    processed_stack = [ ]
+    for frame in reversed(frames_only):
+        filename, line, function, context, _ = inspect.getframeinfo(frame)
+        context = ''.join(context)
+        processed_stack.append((filename, line, function, context))
+    return StackLinesContent(processed_stack, prefix_content, postfix_content)
 
 
 def json_content(json_data):
@@ -208,6 +264,8 @@ def text_content(text):
 
     This is useful for adding details which are short strings.
     """
+    if _isbytes(text):
+        raise TypeError('text_content must be given a string, not bytes.')
     return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
 
 
@@ -238,13 +296,12 @@ def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
     if content_type is None:
         content_type = UTF8_TEXT
     def reader():
-        # This should be try:finally:, but python2.4 makes that hard. When
-        # We drop older python support we can make this use a context manager
-        # for maximum simplicity.
-        stream = open(path, 'rb')
-        for chunk in _iter_chunks(stream, chunk_size, seek_offset, seek_whence):
-            yield chunk
-        stream.close()
+        with open(path, 'rb') as stream:
+            for chunk in _iter_chunks(stream,
+                                      chunk_size,
+                                      seek_offset,
+                                      seek_whence):
+                yield chunk
     return content_from_reader(reader, content_type, buffer_now)
 
 
diff --git a/lib/testtools/testtools/content_type.py b/lib/testtools/testtools/content_type.py
index c491408..bbf314b 100644
--- a/lib/testtools/testtools/content_type.py
+++ b/lib/testtools/testtools/content_type.py
@@ -29,7 +29,7 @@ class ContentType(object):
     def __repr__(self):
         if self.parameters:
             params = '; '
-            params += ', '.join(
+            params += '; '.join(
                 sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
         else:
             params = ''
diff --git a/lib/testtools/testtools/deferredruntest.py b/lib/testtools/testtools/deferredruntest.py
index cf33c06..d22c79f 100644
--- a/lib/testtools/testtools/deferredruntest.py
+++ b/lib/testtools/testtools/deferredruntest.py
@@ -89,14 +89,21 @@ class AsynchronousDeferredRunTest(_DeferredRunTest):
     This is highly experimental code.  Use at your own risk.
     """
 
-    def __init__(self, case, handlers=None, reactor=None, timeout=0.005,
-                 debug=False):
+    def __init__(self, case, handlers=None, last_resort=None, reactor=None,
+                 timeout=0.005, debug=False):
         """Construct an `AsynchronousDeferredRunTest`.
 
+        Please be sure to always use keyword syntax, not positional, as the
+        base class may add arguments in future - and for core code
+        compatibility with that we have to insert them before the local
+        parameters.
+
         :param case: The `TestCase` to run.
         :param handlers: A list of exception handlers (ExceptionType, handler)
             where 'handler' is a callable that takes a `TestCase`, a
             ``testtools.TestResult`` and the exception raised.
+        :param last_resort: Handler to call before re-raising uncatchable
+            exceptions (those for which there is no handler).
         :param reactor: The Twisted reactor to use.  If not given, we use the
             default reactor.
         :param timeout: The maximum time allowed for running a test.  The
@@ -105,7 +112,8 @@ class AsynchronousDeferredRunTest(_DeferredRunTest):
             to get information about unhandled Deferreds and left-over
             DelayedCalls.  Defaults to False.
         """
-        super(AsynchronousDeferredRunTest, self).__init__(case, handlers)
+        super(AsynchronousDeferredRunTest, self).__init__(
+            case, handlers, last_resort)
         if reactor is None:
             from twisted.internet import reactor
         self._reactor = reactor
@@ -119,8 +127,8 @@ class AsynchronousDeferredRunTest(_DeferredRunTest):
         # will be able to be assigned to a class variable *and* also be
         # invoked directly.
         class AsynchronousDeferredRunTestFactory:
-            def __call__(self, case, handlers=None):
-                return cls(case, handlers, reactor, timeout, debug)
+            def __call__(self, case, handlers=None, last_resort=None):
+                return cls(case, handlers, last_resort, reactor, timeout, debug)
         return AsynchronousDeferredRunTestFactory()
 
     @defer.deferredGenerator
diff --git a/lib/testtools/testtools/distutilscmd.py b/lib/testtools/testtools/distutilscmd.py
index 91e14ca..a4d79dc 100644
--- a/lib/testtools/testtools/distutilscmd.py
+++ b/lib/testtools/testtools/distutilscmd.py
@@ -26,7 +26,7 @@ class TestCommand(Command):
 
     def __init__(self, dist):
         Command.__init__(self, dist)
-        self.runner = TestToolsTestRunner(sys.stdout)
+        self.runner = TestToolsTestRunner(stdout=sys.stdout)
 
 
     def initialize_options(self):
diff --git a/lib/testtools/testtools/helpers.py b/lib/testtools/testtools/helpers.py
index 2595c1d..401d2cc 100644
--- a/lib/testtools/testtools/helpers.py
+++ b/lib/testtools/testtools/helpers.py
@@ -8,83 +8,12 @@ __all__ = [
 
 import sys
 
-
-def try_import(name, alternative=None, error_callback=None):
-    """Attempt to import ``name``.  If it fails, return ``alternative``.
-
-    When supporting multiple versions of Python or optional dependencies, it
-    is useful to be able to try to import a module.
-
-    :param name: The name of the object to import, e.g. ``os.path`` or
-        ``os.path.join``.
-    :param alternative: The value to return if no module can be imported.
-        Defaults to None.
-    :param error_callback: If non-None, a callable that is passed the ImportError
-        when the module cannot be loaded.
-    """
-    module_segments = name.split('.')
-    last_error = None
-    while module_segments:
-        module_name = '.'.join(module_segments)
-        try:
-            module = __import__(module_name)
-        except ImportError:
-            last_error = sys.exc_info()[1]
-            module_segments.pop()
-            continue
-        else:
-            break
-    else:
-        if last_error is not None and error_callback is not None:
-            error_callback(last_error)
-        return alternative
-    nonexistent = object()
-    for segment in name.split('.')[1:]:
-        module = getattr(module, segment, nonexistent)
-        if module is nonexistent:
-            if last_error is not None and error_callback is not None:
-                error_callback(last_error)
-            return alternative
-    return module
-
-
-_RAISE_EXCEPTION = object()
-def try_imports(module_names, alternative=_RAISE_EXCEPTION, error_callback=None):
-    """Attempt to import modules.
-
-    Tries to import the first module in ``module_names``.  If it can be
-    imported, we return it.  If not, we go on to the second module and try
-    that.  The process continues until we run out of modules to try.  If none
-    of the modules can be imported, either raise an exception or return the
-    provided ``alternative`` value.
-
-    :param module_names: A sequence of module names to try to import.
-    :param alternative: The value to return if no module can be imported.
-        If unspecified, we raise an ImportError.
-    :param error_callback: If None, called with the ImportError for *each*
-        module that fails to load.
-    :raises ImportError: If none of the modules can be imported and no
-        alternative value was specified.
-    """
-    module_names = list(module_names)
-    for module_name in module_names:
-        module = try_import(module_name, error_callback=error_callback)
-        if module:
-            return module
-    if alternative is _RAISE_EXCEPTION:
-        raise ImportError(
-            "Could not import any of: %s" % ', '.join(module_names))
-    return alternative
-
-
-def safe_hasattr(obj, attr, _marker=object()):
-    """Does 'obj' have an attribute 'attr'?
-
-    Use this rather than built-in hasattr, as the built-in swallows exceptions
-    in some versions of Python and behaves unpredictably with respect to
-    properties.
-    """
-    return getattr(obj, attr, _marker) is not _marker
+# Compat - removal announced in 0.9.25.
+from extras import (
+    safe_hasattr,
+    try_import,
+    try_imports,
+    )
 
 
 def map_values(function, dictionary):
diff --git a/lib/testtools/testtools/matchers/__init__.py b/lib/testtools/testtools/matchers/__init__.py
index ce949fd..771d814 100644
--- a/lib/testtools/testtools/matchers/__init__.py
+++ b/lib/testtools/testtools/matchers/__init__.py
@@ -16,6 +16,7 @@ __all__ = [
     'AfterPreprocessing',
     'AllMatch',
     'Annotate',
+    'AnyMatch',
     'Contains',
     'ContainsAll',
     'ContainedByDict',
@@ -28,6 +29,7 @@ __all__ = [
     'FileContains',
     'FileExists',
     'GreaterThan',
+    'HasLength',
     'HasPermissions',
     'Is',
     'IsInstance',
@@ -39,6 +41,7 @@ __all__ = [
     'MatchesException',
     'MatchesListwise',
     'MatchesPredicate',
+    'MatchesPredicateWithParams',
     'MatchesRegex',
     'MatchesSetwise',
     'MatchesStructure',
@@ -57,6 +60,7 @@ from ._basic import (
     EndsWith,
     Equals,
     GreaterThan,
+    HasLength,
     Is,
     IsInstance,
     LessThan,
@@ -98,9 +102,11 @@ from ._higherorder import (
     AfterPreprocessing,
     AllMatch,
     Annotate,
+    AnyMatch,
     MatchesAll,
     MatchesAny,
     MatchesPredicate,
+    MatchesPredicateWithParams,
     Not,
     )
 
diff --git a/lib/testtools/testtools/matchers/_basic.py b/lib/testtools/testtools/matchers/_basic.py
index 44a47c5..2d9f143 100644
--- a/lib/testtools/testtools/matchers/_basic.py
+++ b/lib/testtools/testtools/matchers/_basic.py
@@ -5,6 +5,7 @@ __all__ = [
     'EndsWith',
     'Equals',
     'GreaterThan',
+    'HasLength',
     'Is',
     'IsInstance',
     'LessThan',
@@ -24,7 +25,10 @@ from ..compat import (
     text_repr,
     )
 from ..helpers import list_subtract
-from ._higherorder import PostfixedMismatch
+from ._higherorder import (
+    MatchesPredicateWithParams,
+    PostfixedMismatch,
+    )
 from ._impl import (
     Matcher,
     Mismatch,
@@ -313,3 +317,10 @@ class MatchesRegex(object):
             pattern = pattern.encode("unicode_escape").decode("ascii")
             return Mismatch("%r does not match /%s/" % (
                     value, pattern.replace("\\\\", "\\")))
+
+
+def has_len(x, y):
+    return len(x) == y
+
+
+HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength")
diff --git a/lib/testtools/testtools/matchers/_dict.py b/lib/testtools/testtools/matchers/_dict.py
index ff05199..b1ec915 100644
--- a/lib/testtools/testtools/matchers/_dict.py
+++ b/lib/testtools/testtools/matchers/_dict.py
@@ -241,7 +241,7 @@ class KeysEqual(Matcher):
         """
         super(KeysEqual, self).__init__()
         try:
-            self.expected = expected.keys()
+            self.expected = expected[0].keys()
         except AttributeError:
             self.expected = list(expected)
 
diff --git a/lib/testtools/testtools/matchers/_exception.py b/lib/testtools/testtools/matchers/_exception.py
index c120487..cd4c90b 100644
--- a/lib/testtools/testtools/matchers/_exception.py
+++ b/lib/testtools/testtools/matchers/_exception.py
@@ -10,8 +10,6 @@ import sys
 
 from testtools.compat import (
     classtypes,
-    _error_repr,
-    isbaseexception,
     istext,
     )
 from ._basic import MatchesRegex
@@ -22,6 +20,17 @@ from ._impl import (
     )
 
 
+_error_repr = BaseException.__repr__
+
+
+def _is_exception(exc):
+    return isinstance(exc, BaseException)
+
+
+def _is_user_exception(exc):
+    return isinstance(exc, Exception)
+
+
 class MatchesException(Matcher):
     """Match an exc_info tuple against an exception instance or type."""
 
@@ -44,7 +53,9 @@ class MatchesException(Matcher):
         if istext(value_re):
             value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
         self.value_re = value_re
-        self._is_instance = type(self.expected) not in classtypes() + (tuple,)
+        expected_type = type(self.expected)
+        self._is_instance = not any(issubclass(expected_type, class_type)
+                for class_type in classtypes() + (tuple,))
 
     def match(self, other):
         if type(other) != tuple:
@@ -101,9 +112,10 @@ class Raises(Matcher):
             else:
                 mismatch = None
             # The exception did not match, or no explicit matching logic was
-            # performed. If the exception is a non-user exception (that is, not
-            # a subclass of Exception on Python 2.5+) then propogate it.
-            if isbaseexception(exc_info[1]):
+            # performed. If the exception is a non-user exception then
+            # propagate it.
+            exception = exc_info[1]
+            if _is_exception(exception) and not _is_user_exception(exception):
                 del exc_info
                 raise
             return mismatch
diff --git a/lib/testtools/testtools/matchers/_higherorder.py b/lib/testtools/testtools/matchers/_higherorder.py
index 53c52b6..3570f57 100644
--- a/lib/testtools/testtools/matchers/_higherorder.py
+++ b/lib/testtools/testtools/matchers/_higherorder.py
@@ -4,6 +4,7 @@ __all__ = [
     'AfterPreprocessing',
     'AllMatch',
     'Annotate',
+    'AnyMatch',
     'MatchesAny',
     'MatchesAll',
     'Not',
@@ -287,3 +288,81 @@ class MatchesPredicate(Matcher):
     def match(self, x):
         if not self.predicate(x):
             return Mismatch(self.message % x)
+
+
+def MatchesPredicateWithParams(predicate, message, name=None):
+    """Match if a given parameterised function returns True.
+
+    It is reasonably common to want to make a very simple matcher based on a
+    function that you already have that returns True or False given some
+    arguments. This matcher makes it very easy to do so. e.g.::
+
+      HasLength = MatchesPredicate(
+          lambda x, y: len(x) == y, 'len({0}) is not {1}')
+      # This assertion will fail, as 'len([1, 2]) == 3' is False.
+      self.assertThat([1, 2], HasLength(3))
+
+    Note that unlike MatchesPredicate MatchesPredicateWithParams returns a
+    factory which you then customise to use by constructing an actual matcher
+    from it.
+
+    The predicate function should take the object to match as its first
+    parameter. Any additional parameters supplied when constructing a matcher
+    are supplied to the predicate as additional parameters when checking for a
+    match.
+
+    :param predicate: The predicate function.
+    :param message: A format string for describing mis-matches.
+    :param name: Optional replacement name for the matcher.
+    """
+    def construct_matcher(*args, **kwargs):
+        return _MatchesPredicateWithParams(
+            predicate, message, name, *args, **kwargs)
+    return construct_matcher
+
+
+class _MatchesPredicateWithParams(Matcher):
+
+    def __init__(self, predicate, message, name, *args, **kwargs):
+        """Create a ``MatchesPredicateWithParams`` matcher.
+
+        :param predicate: A function that takes an object to match and
+            additional params as given in ``*args`` and ``**kwargs``. The
+            result of the function will be interpreted as a boolean to
+            determine a match.
+        :param message: A message to describe a mismatch.  It will be formatted
+            with .format() and be given a tuple containing whatever was passed
+            to ``match()`` + ``*args`` in ``*args``, and whatever was passed to
+            ``**kwargs`` as its ``**kwargs``.
+
+            For instance, to format a single parameter::
+
+                "{0} is not a {1}"
+
+            To format a keyword arg::
+
+                "{0} is not a {type_to_check}"
+        :param name: What name to use for the matcher class. Pass None to use
+            the default.
+        """
+        self.predicate = predicate
+        self.message = message
+        self.name = name
+        self.args = args
+        self.kwargs = kwargs
+
+    def __str__(self):
+        args = [str(arg) for arg in self.args]
+        kwargs = ["%s=%s" % item for item in self.kwargs.items()]
+        args = ", ".join(args + kwargs)
+        if self.name is None:
+            name = 'MatchesPredicateWithParams(%r, %r)' % (
+                self.predicate, self.message)
+        else:
+            name = self.name
+        return '%s(%s)' % (name, args)
+
+    def match(self, x):
+        if not self.predicate(x, *self.args, **self.kwargs):
+            return Mismatch(
+                self.message.format(*((x,) + self.args), **self.kwargs))
diff --git a/lib/testtools/testtools/matchers/_impl.py b/lib/testtools/testtools/matchers/_impl.py
index 36e5ee0..19a93af 100644
--- a/lib/testtools/testtools/matchers/_impl.py
+++ b/lib/testtools/testtools/matchers/_impl.py
@@ -114,9 +114,7 @@ class MismatchError(AssertionError):
     # characters are in the matchee, matcher or mismatch.
 
     def __init__(self, matchee, matcher, mismatch, verbose=False):
-        # Have to use old-style upcalling for Python 2.4 and 2.5
-        # compatibility.
-        AssertionError.__init__(self)
+        super(MismatchError, self).__init__()
         self.matchee = matchee
         self.matcher = matcher
         self.mismatch = mismatch
diff --git a/lib/testtools/testtools/run.py b/lib/testtools/testtools/run.py
index c417bd0..8421f25 100755
--- a/lib/testtools/testtools/run.py
+++ b/lib/testtools/testtools/run.py
@@ -8,13 +8,16 @@ For instance, to run the testtools test suite.
  $ python -m testtools.run testtools.tests.test_suite
 """
 
-import os
+from functools import partial
+import os.path
 import unittest
 import sys
 
-from testtools import TextTestResult
+from extras import safe_hasattr
+
+from testtools import TextTestResult, testcase
 from testtools.compat import classtypes, istext, unicode_output_stream
-from testtools.testsuite import iterate_tests, sorted_tests
+from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests
 
 
 defaultTestLoader = unittest.defaultTestLoader
@@ -26,28 +29,76 @@ if getattr(defaultTestLoader, 'discover', None) is None:
         defaultTestLoader = discover.DiscoveringTestLoader()
         defaultTestLoaderCls = discover.DiscoveringTestLoader
         have_discover = True
+        discover_impl = discover
     except ImportError:
         have_discover = False
 else:
     have_discover = True
+    discover_impl = unittest.loader
+discover_fixed = False
+
+
+def list_test(test):
+    """Return the test ids that would be run if test() was run.
+
+    When things fail to import they can be represented as well, though
+    we use an ugly hack (see http://bugs.python.org/issue19746 for details)
+    to determine that. The difference matters because if a user is
+    filtering tests to run on the returned ids, a failed import can reduce
+    the visible tests but it can be impossible to tell that the selected
+    test would have been one of the imported ones.
+
+    :return: A tuple of test ids that would run and error strings
+        describing things that failed to import.
+    """
+    unittest_import_strs = set([
+        'unittest.loader.ModuleImportFailure.', 'discover.ModuleImportFailure.'
+        ])
+    test_ids = []
+    errors = []
+    for test in iterate_tests(test):
+        # Much ugly.
+        for prefix in unittest_import_strs:
+            if test.id().startswith(prefix):
+                errors.append(test.id()[len(prefix):])
+                break
+        else:
+            test_ids.append(test.id())
+    return test_ids, errors
 
 
 class TestToolsTestRunner(object):
     """ A thunk object to support unittest.TestProgram."""
 
-    def __init__(self, verbosity=None, failfast=None, buffer=None):
+    def __init__(self, verbosity=None, failfast=None, buffer=None,
+        stdout=None):
         """Create a TestToolsTestRunner.
 
         :param verbosity: Ignored.
         :param failfast: Stop running tests at the first failure.
         :param buffer: Ignored.
+        :param stdout: Stream to use for stdout.
         """
         self.failfast = failfast
+        if stdout is None:
+            stdout = sys.stdout
+        self.stdout = stdout
+
+    def list(self, test):
+        """List the tests that would be run if test() was run."""
+        test_ids, errors = list_test(test)
+        for test_id in test_ids:
+            self.stdout.write('%s\n' % test_id)
+        if errors:
+            self.stdout.write('Failed to import\n')
+            for test_id in errors:
+                self.stdout.write('%s\n' % test_id)
+            sys.exit(2)
 
     def run(self, test):
         "Run the given test case or test suite."
         result = TextTestResult(
-            unicode_output_stream(sys.stdout), failfast=self.failfast)
+            unicode_output_stream(self.stdout), failfast=self.failfast)
         result.startTestRun()
         try:
             return test.run(result)
@@ -77,6 +128,8 @@ class TestToolsTestRunner(object):
 #  - The limitation of using getopt is declared to the user.
 #  - http://bugs.python.org/issue16709 is worked around, by sorting tests when
 #    discover is used.
+#  - We monkey-patch the discover and unittest loaders to address
+#     http://bugs.python.org/issue16662 with the proposed upstream patch.
 
 FAILFAST     = "  -f, --failfast   Stop on first failure\n"
 CATCHBREAK   = "  -c, --catch      Catch control-C and display results\n"
@@ -143,6 +196,7 @@ class TestProgram(object):
             argv = sys.argv
         if stdout is None:
             stdout = sys.stdout
+        self.stdout = stdout
 
         self.exit = exit
         self.failfast = failfast
@@ -173,16 +227,16 @@ class TestProgram(object):
             finally:
                 source.close()
             test_ids = set(line.strip().decode('utf-8') for line in lines)
-            filtered = unittest.TestSuite()
-            for test in iterate_tests(self.test):
-                if test.id() in test_ids:
-                    filtered.addTest(test)
-            self.test = filtered
+            self.test = filter_by_ids(self.test, test_ids)
         if not self.listtests:
             self.runTests()
         else:
-            for test in iterate_tests(self.test):
-                stdout.write('%s\n' % test.id())
+            runner = self._get_runner()
+            if safe_hasattr(runner, 'list'):
+                runner.list(self.test)
+            else:
+                for test in iterate_tests(self.test):
+                    self.stdout.write('%s\n' % test.id())
 
     def usageExit(self, msg=None):
         if msg:
@@ -254,6 +308,7 @@ class TestProgram(object):
         if not have_discover:
             raise AssertionError("Unable to use discovery, must use python 2.7 "
                     "or greater, or install the discover package.")
+        _fix_discovery()
         self.progName = '%s discover' % self.progName
         import optparse
         parser = optparse.OptionParser()
@@ -325,26 +380,155 @@ class TestProgram(object):
         if (self.catchbreak
             and getattr(unittest, 'installHandler', None) is not None):
             unittest.installHandler()
+        testRunner = self._get_runner()
+        self.result = testRunner.run(self.test)
+        if self.exit:
+            sys.exit(not self.result.wasSuccessful())
+
+    def _get_runner(self):
         if self.testRunner is None:
             self.testRunner = TestToolsTestRunner
-        if isinstance(self.testRunner, classtypes()):
+        try:
+            testRunner = self.testRunner(verbosity=self.verbosity,
+                                         failfast=self.failfast,
+                                         buffer=self.buffer,
+                                         stdout=self.stdout)
+        except TypeError:
+            # didn't accept the verbosity, buffer, failfast or stdout arguments
+            # Try with the prior contract
             try:
                 testRunner = self.testRunner(verbosity=self.verbosity,
                                              failfast=self.failfast,
                                              buffer=self.buffer)
             except TypeError:
-                # didn't accept the verbosity, buffer or failfast arguments
-                testRunner = self.testRunner()
-        else:
-            # it is assumed to be a TestRunner instance
-            testRunner = self.testRunner
-        self.result = testRunner.run(self.test)
-        if self.exit:
-            sys.exit(not self.result.wasSuccessful())
+                # Now try calling it with defaults
+                try:
+                    testRunner = self.testRunner()
+                except TypeError:
+                    # it is assumed to be a TestRunner instance
+                    testRunner = self.testRunner
+        return testRunner
+
+
+def _fix_discovery():
+    # Monkey patch in the bugfix from http://bugs.python.org/issue16662
+    # - the code here is a straight copy from the Python core tree
+    # with the patch applied.
+    global discover_fixed
+    if discover_fixed:
+        return
+    # Do we have a fixed Python?
+    # (not committed upstream yet - so we can't uncomment this code,
+    # but when it gets committed, the next version to be released won't
+    # need monkey patching.
+    # if sys.version_info[:2] > (3, 4):
+    #     discover_fixed = True
+    #     return
+    if not have_discover:
+        return
+    if safe_hasattr(discover_impl, '_jython_aware_splitext'):
+        _jython_aware_splitext = discover_impl._jython_aware_splitext
+    else:
+        def _jython_aware_splitext(path):
+            if path.lower().endswith('$py.class'):
+                return path[:-9]
+            return os.path.splitext(path)[0]
+    def loadTestsFromModule(self, module, use_load_tests=True, pattern=None):
+        """Return a suite of all tests cases contained in the given module"""
+        # use_load_tests is preserved for compatability though it was never
+        # an official API.
+        # pattern is not an official API either; it is used in discovery to
+        # chain the requested pattern down.
+        tests = []
+        for name in dir(module):
+            obj = getattr(module, name)
+            if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+                tests.append(self.loadTestsFromTestCase(obj))
+
+        load_tests = getattr(module, 'load_tests', None)
+        tests = self.suiteClass(tests)
+        if use_load_tests and load_tests is not None:
+            try:
+                return load_tests(self, tests, pattern)
+            except Exception as e:
+                return discover_impl._make_failed_load_tests(
+                    module.__name__, e, self.suiteClass)
+        return tests
+    def _find_tests(self, start_dir, pattern, namespace=False):
+        """Used by discovery. Yields test suites it loads."""
+        paths = sorted(os.listdir(start_dir))
+
+        for path in paths:
+            full_path = os.path.join(start_dir, path)
+            if os.path.isfile(full_path):
+                if not discover_impl.VALID_MODULE_NAME.match(path):
+                    # valid Python identifiers only
+                    continue
+                if not self._match_path(path, full_path, pattern):
+                    continue
+                # if the test file matches, load it
+                name = self._get_name_from_path(full_path)
+                try:
+                    module = self._get_module_from_name(name)
+                except testcase.TestSkipped as e:
+                    yield discover_impl._make_skipped_test(
+                        name, e, self.suiteClass)
+                except:
+                    yield discover_impl._make_failed_import_test(
+                        name, self.suiteClass)
+                else:
+                    mod_file = os.path.abspath(getattr(module, '__file__', full_path))
+                    realpath = _jython_aware_splitext(
+                        os.path.realpath(mod_file))
+                    fullpath_noext = _jython_aware_splitext(
+                        os.path.realpath(full_path))
+                    if realpath.lower() != fullpath_noext.lower():
+                        module_dir = os.path.dirname(realpath)
+                        mod_name = _jython_aware_splitext(
+                            os.path.basename(full_path))
+                        expected_dir = os.path.dirname(full_path)
+                        msg = ("%r module incorrectly imported from %r. Expected %r. "
+                               "Is this module globally installed?")
+                        raise ImportError(msg % (mod_name, module_dir, expected_dir))
+                    yield self.loadTestsFromModule(module, pattern=pattern)
+            elif os.path.isdir(full_path):
+                if (not namespace and
+                    not os.path.isfile(os.path.join(full_path, '__init__.py'))):
+                    continue
+
+                load_tests = None
+                tests = None
+                name = self._get_name_from_path(full_path)
+                try:
+                    package = self._get_module_from_name(name)
+                except testcase.TestSkipped as e:
+                    yield discover_impl._make_skipped_test(
+                        name, e, self.suiteClass)
+                except:
+                    yield discover_impl._make_failed_import_test(
+                        name, self.suiteClass)
+                else:
+                    load_tests = getattr(package, 'load_tests', None)
+                    tests = self.loadTestsFromModule(package, pattern=pattern)
+                    if tests is not None:
+                        # tests loaded from package file
+                        yield tests
+
+                    if load_tests is not None:
+                        # loadTestsFromModule(package) has load_tests for us.
+                        continue
+                    # recurse into the package
+                    pkg_tests =  self._find_tests(
+                        full_path, pattern, namespace=namespace)
+                    for test in pkg_tests:
+                        yield test
+    defaultTestLoaderCls.loadTestsFromModule = loadTestsFromModule
+    defaultTestLoaderCls._find_tests = _find_tests
+
 ################
 
 def main(argv, stdout):
-    program = TestProgram(argv=argv, testRunner=TestToolsTestRunner,
+    program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout),
         stdout=stdout)
 
 if __name__ == '__main__':
diff --git a/lib/testtools/testtools/runtest.py b/lib/testtools/testtools/runtest.py
index 507ad87..a29cdd6 100644
--- a/lib/testtools/testtools/runtest.py
+++ b/lib/testtools/testtools/runtest.py
@@ -47,17 +47,23 @@ class RunTest(object):
         reporting of error/failure/skip etc.
     """
 
-    def __init__(self, case, handlers=None):
+    def __init__(self, case, handlers=None, last_resort=None):
         """Create a RunTest to run a case.
 
         :param case: A testtools.TestCase test case object.
         :param handlers: Exception handlers for this RunTest. These are stored
             in self.handlers and can be modified later if needed.
+        :param last_resort: A handler of last resort: any exception which is
+            not handled by handlers will cause the last resort handler to be
+            called as last_resort(exc_info), and then the exception will be
+            raised - aborting the test run as this is inside the runner
+            machinery rather than the confined context of the test.
         """
         self.case = case
         self.handlers = handlers or []
         self.exception_caught = object()
         self._exceptions = []
+        self.last_resort = last_resort or (lambda case, result, exc: None)
 
     def run(self, result=None):
         """Run self.case reporting activity to result.
@@ -106,12 +112,23 @@ class RunTest(object):
                     if isinstance(e, exc_class):
                         handler(self.case, self.result, e)
                         break
+                else:
+                    self.last_resort(self.case, self.result, e)
+                    raise e
         finally:
             result.stopTest(self.case)
         return result
 
     def _run_core(self):
         """Run the user supplied test code."""
+        test_method = self.case._get_test_method()
+        if getattr(test_method, '__unittest_skip__', False):
+            self.result.addSkip(
+                self.case,
+                reason=getattr(test_method, '__unittest_skip_why__', None)
+            )
+            return
+
         if self.exception_caught == self._run_user(self.case._run_setup,
             self.result):
             # Don't run the test method if we failed getting here.
@@ -135,6 +152,9 @@ class RunTest(object):
                         self._run_cleanups, self.result):
                         failed = True
                 finally:
+                    if getattr(self.case, 'force_failure', None):
+                        self._run_user(_raise_force_fail_error)
+                        failed = True
                     if not failed:
                         self.result.addSuccess(self.case,
                             details=self.case.getDetails())
@@ -167,8 +187,6 @@ class RunTest(object):
         """
         try:
             return fn(*args, **kwargs)
-        except KeyboardInterrupt:
-            raise
         except:
             return self._got_user_exception(sys.exc_info())
 
@@ -193,11 +211,15 @@ class RunTest(object):
             self.case.onException(exc_info, tb_label=tb_label)
         finally:
             del exc_info
-        for exc_class, handler in self.handlers:
-            if isinstance(e, exc_class):
-                self._exceptions.append(e)
-                return self.exception_caught
-        raise e
+        self._exceptions.append(e)
+        # Yes, this means we catch everything - we re-raise KeyBoardInterrupt
+        # etc later, after tearDown and cleanUp - since those may be cleaning up
+        # external processes.
+        return self.exception_caught
+
+
+def _raise_force_fail_error():
+    raise AssertionError("Forced Test Failure")
 
 
 # Signal that this is part of the testing framework, and that code from this
diff --git a/lib/testtools/testtools/testcase.py b/lib/testtools/testtools/testcase.py
index fc5f863..b646f82 100644
--- a/lib/testtools/testtools/testcase.py
+++ b/lib/testtools/testtools/testcase.py
@@ -4,6 +4,7 @@
 
 __metaclass__ = type
 __all__ = [
+    'attr',
     'clone_test_with_new_id',
     'ExpectedException',
     'gather_details',
@@ -15,14 +16,19 @@ __all__ = [
     ]
 
 import copy
+import functools
 import itertools
 import sys
 import types
 import unittest
 
+from extras import (
+    safe_hasattr,
+    try_import,
+    )
+
 from testtools import (
     content,
-    try_import,
     )
 from testtools.compat import (
     advance_iterator,
@@ -51,7 +57,7 @@ wraps = try_import('functools.wraps')
 
 class TestSkipped(Exception):
     """Raised within TestCase.run() when a test is skipped."""
-testSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
+TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
 TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
 
 
@@ -78,6 +84,20 @@ _ExpectedFailure = try_import(
     'unittest.case._ExpectedFailure', _ExpectedFailure)
 
 
+# Copied from unittest before python 3.4 release. Used to maintain
+# compatibility with unittest sub-test feature. Users should not use this
+# directly.
+def _expectedFailure(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        try:
+            func(*args, **kwargs)
+        except Exception:
+            raise _ExpectedFailure(sys.exc_info())
+        raise _UnexpectedSuccess
+    return wrapper
+
+
 def run_test_with(test_runner, **kwargs):
     """Decorate a test as using a specific ``RunTest``.
 
@@ -106,9 +126,16 @@ def run_test_with(test_runner, **kwargs):
     def decorator(function):
         # Set an attribute on 'function' which will inform TestCase how to
         # make the runner.
-        function._run_test_with = (
-            lambda case, handlers=None:
-                test_runner(case, handlers=handlers, **kwargs))
+        def _run_test_with(case, handlers=None, last_resort=None):
+            try:
+                return test_runner(
+                    case, handlers=handlers, last_resort=last_resort,
+                    **kwargs)
+            except TypeError:
+                # Backwards compat: if we can't call the constructor
+                # with last_resort, try without that.
+                return test_runner(case, handlers=handlers, **kwargs)
+        function._run_test_with = _run_test_with
         return function
     return decorator
 
@@ -150,6 +177,8 @@ class TestCase(unittest.TestCase):
     :ivar exception_handlers: Exceptions to catch from setUp, runTest and
         tearDown. This list is able to be modified at any time and consists of
         (exception_class, handler(case, result, exception_value)) pairs.
+    :ivar force_failure: Force testtools.RunTest to fail the test after the
+        test has completed.
     :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
         Defaults to ``RunTest``.  The factory is expected to take a test case
         and an optional list of exception handlers.
@@ -185,7 +214,12 @@ class TestCase(unittest.TestCase):
             runTest = getattr(
                 test_method, '_run_test_with', self.run_tests_with)
         self.__RunTest = runTest
+        if getattr(test_method, '__unittest_expecting_failure__', False):
+            setattr(self, self._testMethodName, _expectedFailure(test_method))
+        # Used internally for onException processing - used to gather extra
+        # data from exceptions.
         self.__exception_handlers = []
+        # Passed to RunTest to map exceptions to result actions
         self.exception_handlers = [
             (self.skipException, self._report_skip),
             (self.failureException, self._report_failure),
@@ -193,9 +227,6 @@ class TestCase(unittest.TestCase):
             (_UnexpectedSuccess, self._report_unexpected_success),
             (Exception, self._report_error),
             ]
-        if sys.version_info < (2, 6):
-            # Catch old-style string exceptions with None as the instance
-            self.exception_handlers.append((type(None), self._report_error))
 
     def __eq__(self, other):
         eq = getattr(unittest.TestCase, '__eq__', None)
@@ -318,9 +349,9 @@ class TestCase(unittest.TestCase):
 
     failUnlessEqual = assertEquals = assertEqual
 
-    def assertIn(self, needle, haystack):
+    def assertIn(self, needle, haystack, message=''):
         """Assert that needle is in haystack."""
-        self.assertThat(haystack, Contains(needle))
+        self.assertThat(haystack, Contains(needle), message)
 
     def assertIsNone(self, observed, message=''):
         """Assert that 'observed' is equal to None.
@@ -355,10 +386,10 @@ class TestCase(unittest.TestCase):
         matcher = Not(Is(expected))
         self.assertThat(observed, matcher, message)
 
-    def assertNotIn(self, needle, haystack):
+    def assertNotIn(self, needle, haystack, message=''):
         """Assert that needle is not in haystack."""
         matcher = Not(Contains(needle))
-        self.assertThat(haystack, matcher)
+        self.assertThat(haystack, matcher, message)
 
     def assertIsInstance(self, obj, klass, msg=None):
         if isinstance(klass, tuple):
@@ -397,19 +428,62 @@ class TestCase(unittest.TestCase):
         :param matcher: An object meeting the testtools.Matcher protocol.
         :raises MismatchError: When matcher does not match thing.
         """
+        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
+        if mismatch_error is not None:
+            raise mismatch_error
+
+    def addDetailUniqueName(self, name, content_object):
+        """Add a detail to the test, but ensure it's name is unique.
+
+        This method checks whether ``name`` conflicts with a detail that has
+        already been added to the test. If it does, it will modify ``name`` to
+        avoid the conflict.
+
+        For more details see pydoc testtools.TestResult.
+
+        :param name: The name to give this detail.
+        :param content_object: The content object for this detail. See
+            testtools.content for more detail.
+        """
+        existing_details = self.getDetails()
+        full_name = name
+        suffix = 1
+        while full_name in existing_details:
+            full_name = "%s-%d" % (name, suffix)
+            suffix += 1
+        self.addDetail(full_name, content_object)
+
+    def expectThat(self, matchee, matcher, message='', verbose=False):
+        """Check that matchee is matched by matcher, but delay the assertion failure.
+
+        This method behaves similarly to ``assertThat``, except that a failed
+        match does not exit the test immediately. The rest of the test code will
+        continue to run, and the test will be marked as failing after the test
+        has finished.
+
+        :param matchee: An object to match with matcher.
+        :param matcher: An object meeting the testtools.Matcher protocol.
+        :param message: If specified, show this message with any failed match.
+        """
+        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
+
+        if mismatch_error is not None:
+            self.addDetailUniqueName(
+                "Failed expectation",
+                content.StacktraceContent(
+                    postfix_content="MismatchError: " + str(mismatch_error)
+                )
+            )
+            self.force_failure = True
+
+    def _matchHelper(self, matchee, matcher, message, verbose):
         matcher = Annotate.if_message(message, matcher)
         mismatch = matcher.match(matchee)
         if not mismatch:
             return
-        existing_details = self.getDetails()
-        for (name, content) in mismatch.get_details().items():
-            full_name = name
-            suffix = 1
-            while full_name in existing_details:
-                full_name = "%s-%d" % (name, suffix)
-                suffix += 1
-            self.addDetail(full_name, content)
-        raise MismatchError(matchee, matcher, mismatch, verbose)
+        for (name, value) in mismatch.get_details().items():
+            self.addDetailUniqueName(name, value)
+        return MismatchError(matchee, matcher, mismatch, verbose)
 
     def defaultTestResult(self):
         return TestResult()
@@ -505,9 +579,12 @@ class TestCase(unittest.TestCase):
     def _report_traceback(self, exc_info, tb_label='traceback'):
         id_gen = self._traceback_id_gens.setdefault(
             tb_label, itertools.count(0))
-        tb_id = advance_iterator(id_gen)
-        if tb_id:
-            tb_label = '%s-%d' % (tb_label, tb_id)
+        while True:
+            tb_id = advance_iterator(id_gen)
+            if tb_id:
+                tb_label = '%s-%d' % (tb_label, tb_id)
+            if tb_label not in self.getDetails():
+                break
         self.addDetail(tb_label, content.TracebackContent(exc_info, self))
 
     @staticmethod
@@ -515,7 +592,14 @@ class TestCase(unittest.TestCase):
         result.addUnexpectedSuccess(self, details=self.getDetails())
 
     def run(self, result=None):
-        return self.__RunTest(self, self.exception_handlers).run(result)
+        try:
+            run_test = self.__RunTest(
+                self, self.exception_handlers, last_resort=self._report_error)
+        except TypeError:
+            # Backwards compat: if we can't call the constructor
+            # with last_resort, try without that.
+            run_test = self.__RunTest(self, self.exception_handlers)
+        return run_test.run(result)
 
     def _run_setup(self, result):
         """Run the setUp function for this test.
@@ -527,10 +611,12 @@ class TestCase(unittest.TestCase):
         ret = self.setUp()
         if not self.__setup_called:
             raise ValueError(
+                "In File: %s\n"
                 "TestCase.setUp was not called. Have you upcalled all the "
                 "way up the hierarchy from your setUp? e.g. Call "
                 "super(%s, self).setUp() from your setUp()."
-                % self.__class__.__name__)
+                % (sys.modules[self.__class__.__module__].__file__,
+                   self.__class__.__name__))
         return ret
 
     def _run_teardown(self, result):
@@ -543,19 +629,16 @@ class TestCase(unittest.TestCase):
         ret = self.tearDown()
         if not self.__teardown_called:
             raise ValueError(
+                "In File: %s\n"
                 "TestCase.tearDown was not called. Have you upcalled all the "
                 "way up the hierarchy from your tearDown? e.g. Call "
                 "super(%s, self).tearDown() from your tearDown()."
-                % self.__class__.__name__)
+                % (sys.modules[self.__class__.__module__].__file__,
+                   self.__class__.__name__))
         return ret
 
     def _get_test_method(self):
-        absent_attr = object()
-        # Python 2.5+
-        method_name = getattr(self, '_testMethodName', absent_attr)
-        if method_name is absent_attr:
-            # Python 2.4
-            method_name = getattr(self, '_TestCase__testMethodName')
+        method_name = getattr(self, '_testMethodName')
         return getattr(self, method_name)
 
     def _run_test_method(self, result):
@@ -578,8 +661,18 @@ class TestCase(unittest.TestCase):
         try:
             fixture.setUp()
         except:
-            gather_details(fixture.getDetails(), self.getDetails())
-            raise
+            exc_info = sys.exc_info()
+            try:
+                gather_details(fixture.getDetails(), self.getDetails())
+            except:
+                # Report the setUp exception, then raise the error during
+                # gather_details.
+                self._report_traceback(exc_info)
+                raise
+            else:
+                # Gather_details worked, so raise the exception setUp
+                # encountered.
+                reraise(*exc_info)
         else:
             self.addCleanup(fixture.cleanUp)
             self.addCleanup(
@@ -588,11 +681,24 @@ class TestCase(unittest.TestCase):
 
     def setUp(self):
         super(TestCase, self).setUp()
+        if self.__setup_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.setUp was already called. Do not explicitly call "
+                "setUp from your tests. In your own setUp, use super to call "
+                "the base setUp."
+                % (sys.modules[self.__class__.__module__].__file__,))
         self.__setup_called = True
 
     def tearDown(self):
         super(TestCase, self).tearDown()
-        unittest.TestCase.tearDown(self)
+        if self.__teardown_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.tearDown was already called. Do not explicitly call "
+                "tearDown from your tests. In your own tearDown, use super to "
+                "call the base tearDown."
+                % (sys.modules[self.__class__.__module__].__file__,))
         self.__teardown_called = True
 
 
@@ -606,7 +712,7 @@ class PlaceHolder(object):
     failureException = None
 
     def __init__(self, test_id, short_description=None, details=None,
-        outcome='addSuccess', error=None):
+        outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
         """Construct a `PlaceHolder`.
 
         :param test_id: The id of the placeholder test.
@@ -614,6 +720,9 @@ class PlaceHolder(object):
             test. If not provided, the id will be used instead.
         :param details: Outcome details as accepted by addSuccess etc.
         :param outcome: The outcome to call. Defaults to 'addSuccess'.
+        :param tags: Tags to report for the test.
+        :param timestamps: A two-tuple of timestamps for the test start and
+            finish. Each timestamp may be None to indicate it is not known.
         """
         self._test_id = test_id
         self._short_description = short_description
@@ -621,6 +730,9 @@ class PlaceHolder(object):
         self._outcome = outcome
         if error is not None:
             self._details['traceback'] = content.TracebackContent(error, self)
+        tags = tags or frozenset()
+        self._tags = frozenset(tags)
+        self._timestamps = timestamps
 
     def __call__(self, result=None):
         return self.run(result=result)
@@ -654,10 +766,16 @@ class PlaceHolder(object):
 
     def run(self, result=None):
         result = self._result(result)
+        if self._timestamps[0] is not None:
+            result.time(self._timestamps[0])
+        result.tags(self._tags, set())
         result.startTest(self)
+        if self._timestamps[1] is not None:
+            result.time(self._timestamps[1])
         outcome = getattr(result, self._outcome)
         outcome(self, details=self._details)
         result.stopTest(self)
+        result.tags(set(), self._tags)
 
     def shortDescription(self):
         if self._short_description is None:
@@ -680,9 +798,19 @@ def ErrorHolder(test_id, error, short_description=None, details=None):
         details=details, outcome='addError', error=error)
 
 
-# Python 2.4 did not know how to copy functions.
-if types.FunctionType not in copy._copy_dispatch:
-    copy._copy_dispatch[types.FunctionType] = copy._copy_immutable
+def _clone_test_id_callback(test, callback):
+    """Copy a `TestCase`, and make it call callback for its id().
+
+    This is only expected to be used on tests that have been constructed but
+    not executed.
+
+    :param test: A TestCase instance.
+    :param callback: A callable that takes no parameters and returns a string.
+    :return: A copy.copy of the test with id=callback.
+    """
+    newTest = copy.copy(test)
+    newTest.id = callback
+    return newTest
 
 
 def clone_test_with_new_id(test, new_id):
@@ -691,9 +819,45 @@ def clone_test_with_new_id(test, new_id):
     This is only expected to be used on tests that have been constructed but
     not executed.
     """
-    newTest = copy.copy(test)
-    newTest.id = lambda: new_id
-    return newTest
+    return _clone_test_id_callback(test, lambda: new_id)
+
+
+def attr(*args):
+    """Decorator for adding attributes to WithAttributes.
+
+    :param args: The name of attributes to add.
+    :return: A callable that when applied to a WithAttributes will
+        alter its id to enumerate the added attributes.
+    """
+    def decorate(fn):
+        if not safe_hasattr(fn, '__testtools_attrs'):
+            fn.__testtools_attrs = set()
+        fn.__testtools_attrs.update(args)
+        return fn
+    return decorate
+
+
+class WithAttributes(object):
+    """A mix-in class for modifying test id by attributes.
+
+    e.g.
+    >>> class MyTest(WithAttributes, TestCase):
+    ...    @attr('foo')
+    ...    def test_bar(self):
+    ...        pass
+    >>> MyTest('test_bar').id()
+    testtools.testcase.MyTest/test_bar[foo]
+    """
+
+    def id(self):
+        orig = super(WithAttributes, self).id()
+        # Depends on testtools.TestCase._get_test_method, be nice to support
+        # plain unittest.
+        fn = self._get_test_method()
+        attributes = getattr(fn, '__testtools_attrs', None)
+        if not attributes:
+            return orig
+        return orig + '[' + ','.join(sorted(attributes)) + ']'
 
 
 def skip(reason):
@@ -704,6 +868,12 @@ def skip(reason):
     @unittest.skip decorator.
     """
     def decorator(test_item):
+        # This attribute signals to RunTest._run_core that the entire test
+        # must be skipped - including setUp and tearDown. This makes us
+        # compatible with testtools.skip* functions, which set the same
+        # attributes.
+        test_item.__unittest_skip__ = True
+        test_item.__unittest_skip_why__ = reason
         if wraps is not None:
             @wraps(test_item)
             def skip_wrapper(*args, **kwargs):
@@ -716,7 +886,7 @@ def skip(reason):
 
 
 def skipIf(condition, reason):
-    """Skip a test if the condition is true."""
+    """A decorator to skip a test if the condition is true."""
     if condition:
         return skip(reason)
     def _id(obj):
@@ -725,7 +895,7 @@ def skipIf(condition, reason):
 
 
 def skipUnless(condition, reason):
-    """Skip a test unless the condition is true."""
+    """A decorator to skip a test unless the condition is true."""
     if not condition:
         return skip(reason)
     def _id(obj):
@@ -736,8 +906,6 @@ def skipUnless(condition, reason):
 class ExpectedException:
     """A context manager to handle expected exceptions.
 
-    In Python 2.5 or later::
-
       def test_foo(self):
           with ExpectedException(ValueError, 'fo.*'):
               raise ValueError('foo')
@@ -748,26 +916,33 @@ class ExpectedException:
     exception is raised, an AssertionError will be raised.
     """
 
-    def __init__(self, exc_type, value_re=None):
+    def __init__(self, exc_type, value_re=None, msg=None):
         """Construct an `ExpectedException`.
 
         :param exc_type: The type of exception to expect.
         :param value_re: A regular expression to match against the
             'str()' of the raised exception.
+        :param msg: An optional message explaining the failure.
         """
         self.exc_type = exc_type
         self.value_re = value_re
+        self.msg = msg
 
     def __enter__(self):
         pass
 
     def __exit__(self, exc_type, exc_value, traceback):
         if exc_type is None:
-            raise AssertionError('%s not raised.' % self.exc_type.__name__)
+            error_msg = '%s not raised.' % self.exc_type.__name__
+            if self.msg:
+                error_msg = error_msg + ' : ' + self.msg
+            raise AssertionError(error_msg)
         if exc_type != self.exc_type:
             return False
         if self.value_re:
             matcher = MatchesException(self.exc_type, self.value_re)
+            if self.msg:
+                matcher = Annotate(self.msg, matcher)
             mismatch = matcher.match((exc_type, exc_value, traceback))
             if mismatch:
                 raise AssertionError(mismatch.describe())
@@ -793,6 +968,55 @@ class Nullary(object):
         return repr(self._callable_object)
 
 
+class DecorateTestCaseResult(object):
+    """Decorate a TestCase and permit customisation of the result for runs."""
+
+    def __init__(self, case, callout, before_run=None, after_run=None):
+        """Construct a DecorateTestCaseResult.
+
+        :param case: The case to decorate.
+        :param callout: A callback to call when run/__call__/debug is called.
+            Must take a result parameter and return a result object to be used.
+            For instance: lambda result: result.
+        :param before_run: If set, call this with the decorated result before
+            calling into the decorated run/__call__ method.
+        :param before_run: If set, call this with the decorated result after
+            calling into the decorated run/__call__ method.
+        """
+        self.decorated = case
+        self.callout = callout
+        self.before_run = before_run
+        self.after_run = after_run
+
+    def _run(self, result, run_method):
+        result = self.callout(result)
+        if self.before_run:
+            self.before_run(result)
+        try:
+            return run_method(result)
+        finally:
+            if self.after_run:
+                self.after_run(result)
+
+    def run(self, result=None):
+        self._run(result, self.decorated.run)
+
+    def __call__(self, result=None):
+        self._run(result, self.decorated)
+
+    def __getattr__(self, name):
+        return getattr(self.decorated, name)
+
+    def __delattr__(self, name):
+        delattr(self.decorated, name)
+
+    def __setattr__(self, name, value):
+        if name in ('decorated', 'callout', 'before_run', 'after_run'):
+            self.__dict__[name] = value
+            return
+        setattr(self.decorated, name, value)
+
+
 # Signal that this is part of the testing framework, and that code from this
 # should not normally appear in tracebacks.
 __unittest = True
diff --git a/lib/testtools/testtools/testresult/__init__.py b/lib/testtools/testtools/testresult/__init__.py
index d37a772..5bf8f9c 100644
--- a/lib/testtools/testtools/testresult/__init__.py
+++ b/lib/testtools/testtools/testresult/__init__.py
@@ -3,23 +3,47 @@
 """Test result objects."""
 
 __all__ = [
+    'CopyStreamResult',
     'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
     'MultiTestResult',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamResultRouter',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
     'Tagger',
     'TestByTestResult',
+    'TestControl',
     'TestResult',
     'TestResultDecorator',
     'TextTestResult',
     'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
     ]
 
 from testtools.testresult.real import (
+    CopyStreamResult,
     ExtendedToOriginalDecorator,
+    ExtendedToStreamDecorator,
     MultiTestResult,
+    StreamFailFast,
+    StreamResult,
+    StreamResultRouter,
+    StreamSummary,
+    StreamTagger,
+    StreamToDict,
+    StreamToExtendedDecorator,
+    StreamToQueue,
     Tagger,
     TestByTestResult,
+    TestControl,
     TestResult,
     TestResultDecorator,
     TextTestResult,
     ThreadsafeForwardingResult,
+    TimestampingStreamResult,
     )
diff --git a/lib/testtools/testtools/testresult/doubles.py b/lib/testtools/testtools/testresult/doubles.py
index 1865e93..d86f7fa 100644
--- a/lib/testtools/testtools/testresult/doubles.py
+++ b/lib/testtools/testtools/testresult/doubles.py
@@ -6,6 +6,7 @@ __all__ = [
     'Python26TestResult',
     'Python27TestResult',
     'ExtendedTestResult',
+    'StreamResult',
     ]
 
 
@@ -148,3 +149,26 @@ class ExtendedTestResult(Python27TestResult):
 
     def wasSuccessful(self):
         return self._was_successful
+
+
+class StreamResult(object):
+    """A StreamResult implementation for testing.
+
+    All events are logged to _events.
+    """
+
+    def __init__(self):
+        self._events = []
+
+    def startTestRun(self):
+        self._events.append(('startTestRun',))
+
+    def stopTestRun(self):
+        self._events.append(('stopTestRun',))
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        self._events.append(('status', test_id, test_status, test_tags,
+            runnable, file_name, file_bytes, eof, mime_type, route_code,
+            timestamp))
diff --git a/lib/testtools/testtools/testresult/real.py b/lib/testtools/testtools/testresult/real.py
index 0a69872..1453041 100644
--- a/lib/testtools/testtools/testresult/real.py
+++ b/lib/testtools/testtools/testresult/real.py
@@ -5,24 +5,43 @@
 __metaclass__ = type
 __all__ = [
     'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
     'MultiTestResult',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
     'Tagger',
+    'TestControl',
     'TestResult',
     'TestResultDecorator',
     'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
     ]
 
 import datetime
+from operator import methodcaller
 import sys
 import unittest
 
-from testtools.compat import all, str_is_unicode, _u
+from extras import safe_hasattr, try_import, try_imports
+parse_mime_type = try_import('mimeparse.parse_mime_type')
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools.compat import str_is_unicode, _u, _b
 from testtools.content import (
+    Content,
     text_content,
     TracebackContent,
     )
-from testtools.helpers import safe_hasattr
+from testtools.content_type import ContentType
 from testtools.tags import TagContext
+# circular import
+# from testtools.testcase import PlaceHolder
+PlaceHolder = None
 
 # From http://docs.python.org/library/datetime.html
 _ZERO = datetime.timedelta(0)
@@ -243,6 +262,531 @@ class TestResult(unittest.TestResult):
         """
 
 
+class StreamResult(object):
+    """A test result for reporting the activity of a test run.
+
+    Typical use
+
+      >>> result = StreamResult()
+      >>> result.startTestRun()
+      >>> try:
+      ...     case.run(result)
+      ... finally:
+      ...     result.stopTestRun()
+
+    The case object will be either a TestCase or a TestSuite, and
+    generally make a sequence of calls like::
+
+      >>> result.status(self.id(), 'inprogress')
+      >>> result.status(self.id(), 'success')
+
+    General concepts
+
+    StreamResult is built to process events that are emitted by tests during a
+    test run or test enumeration. The test run may be running concurrently, and
+    even be spread out across multiple machines.
+
+    All events are timestamped to prevent network buffering or scheduling
+    latency causing false timing reports. Timestamps are datetime objects in
+    the UTC timezone.
+
+    A route_code is a unicode string that identifies where a particular test
+    run. This is optional in the API but very useful when multiplexing multiple
+    streams together as it allows identification of interactions between tests
+    that were run on the same hardware or in the same test process. Generally
+    actual tests never need to bother with this - it is added and processed
+    by StreamResult's that do multiplexing / run analysis. route_codes are
+    also used to route stdin back to pdb instances.
+
+    The StreamResult base class does no accounting or processing, rather it
+    just provides an empty implementation of every method, suitable for use
+    as a base class regardless of intent.
+    """
+
+    def startTestRun(self):
+        """Start a test run.
+
+        This will prepare the test result to process results (which might imply
+        connecting to a database or remote machine).
+        """
+
+    def stopTestRun(self):
+        """Stop a test run.
+
+        This informs the result that no more test updates will be received. At
+        this point any test ids that have started and not completed can be
+        considered failed-or-hung.
+        """
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        """Inform the result about a test status.
+
+        :param test_id: The test whose status is being reported. None to
+            report status about the test run as a whole.
+        :param test_status: The status for the test. There are two sorts of
+            status - interim and final status events. As many interim events
+            can be generated as desired, but only one final event. After a
+            final status event any further file or status events from the
+            same test_id+route_code may be discarded or associated with a new
+            test by the StreamResult. (But no exception will be thrown).
+
+            Interim states:
+              * None - no particular status is being reported, or status being
+                reported is not associated with a test (e.g. when reporting on
+                stdout / stderr chatter).
+              * inprogress - the test is currently running. Emitted by tests when
+                they start running and at any intermediary point they might
+                choose to indicate their continual operation.
+
+            Final states:
+              * exists - the test exists. This is used when a test is not being
+                executed. Typically this is when querying what tests could be run
+                in a test run (which is useful for selecting tests to run).
+              * xfail - the test failed but that was expected. This is purely
+                informative - the test is not considered to be a failure.
+              * uxsuccess - the test passed but was expected to fail. The test
+                will be considered a failure.
+              * success - the test has finished without error.
+              * fail - the test failed (or errored). The test will be considered
+                a failure.
+              * skip - the test was selected to run but chose to be skipped. E.g.
+                a test dependency was missing. This is purely informative - the
+                test is not considered to be a failure.
+
+        :param test_tags: Optional set of tags to apply to the test. Tags
+            have no intrinsic meaning - that is up to the test author.
+        :param runnable: Allows status reports to mark that they are for
+            tests which are not able to be explicitly run. For instance,
+            subtests will report themselves as non-runnable.
+        :param file_name: The name for the file_bytes. Any unicode string may
+            be used. While there is no semantic value attached to the name
+            of any attachment, the names 'stdout' and 'stderr' and 'traceback'
+            are recommended for use only for output sent to stdout, stderr and
+            tracebacks of exceptions. When file_name is supplied, file_bytes
+            must be a bytes instance.
+        :param file_bytes: A bytes object containing content for the named
+            file. This can just be a single chunk of the file - emitting
+            another file event with more later. Must be None unleses a
+            file_name is supplied.
+        :param eof: True if this chunk is the last chunk of the file, any
+            additional chunks with the same name should be treated as an error
+            and discarded. Ignored unless file_name has been supplied.
+        :param mime_type: An optional MIME type for the file. stdout and
+            stderr will generally be "text/plain; charset=utf8". If None,
+            defaults to application/octet-stream. Ignored unless file_name
+            has been supplied.
+        """
+
+
+def domap(*args, **kwargs):
+    return list(map(*args, **kwargs))
+
+
+class CopyStreamResult(StreamResult):
+    """Copies all event it receives to multiple results.
+
+    This provides an easy facility for combining multiple StreamResults.
+
+    For TestResult the equivalent class was ``MultiTestResult``.
+    """
+
+    def __init__(self, targets):
+        super(CopyStreamResult, self).__init__()
+        self.targets = targets
+
+    def startTestRun(self):
+        super(CopyStreamResult, self).startTestRun()
+        domap(methodcaller('startTestRun'), self.targets)
+
+    def stopTestRun(self):
+        super(CopyStreamResult, self).stopTestRun()
+        domap(methodcaller('stopTestRun'), self.targets)
+
+    def status(self, *args, **kwargs):
+        super(CopyStreamResult, self).status(*args, **kwargs)
+        domap(methodcaller('status', *args, **kwargs), self.targets)
+
+
+class StreamFailFast(StreamResult):
+    """Call the supplied callback if an error is seen in a stream.
+
+    An example callback::
+
+       def do_something():
+           pass
+    """
+
+    def __init__(self, on_error):
+        self.on_error = on_error
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        if test_status in ('uxsuccess', 'fail'):
+            self.on_error()
+
+
+class StreamResultRouter(StreamResult):
+    """A StreamResult that routes events.
+
+    StreamResultRouter forwards received events to another StreamResult object,
+    selected by a dynamic forwarding policy. Events where no destination is
+    found are forwarded to the fallback StreamResult, or an error is raised.
+
+    Typical use is to construct a router with a fallback and then either
+    create up front mapping rules, or create them as-needed from the fallback
+    handler::
+
+      >>> router = StreamResultRouter()
+      >>> sink = doubles.StreamResult()
+      >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+      ...     consume_route=True)
+      >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+    StreamResultRouter has no buffering.
+
+    When adding routes (and for the fallback) whether to call startTestRun and
+    stopTestRun or to not call them is controllable by passing
+    'do_start_stop_run'. The default is to call them for the fallback only.
+    If a route is added after startTestRun has been called, and
+    do_start_stop_run is True then startTestRun is called immediately on the
+    new route sink.
+
+    There is no a-priori defined lookup order for routes: if they are ambiguous
+    the behaviour is undefined. Only a single route is chosen for any event.
+    """
+
+    _policies = {}
+
+    def __init__(self, fallback=None, do_start_stop_run=True):
+        """Construct a StreamResultRouter with optional fallback.
+
+        :param fallback: A StreamResult to forward events to when no route
+            exists for them.
+        :param do_start_stop_run: If False do not pass startTestRun and
+            stopTestRun onto the fallback.
+        """
+        self.fallback = fallback
+        self._route_code_prefixes = {}
+        self._test_ids = {}
+        # Records sinks that should have do_start_stop_run called on them.
+        self._sinks = []
+        if do_start_stop_run and fallback:
+            self._sinks.append(fallback)
+        self._in_run = False
+
+    def startTestRun(self):
+        super(StreamResultRouter, self).startTestRun()
+        for sink in self._sinks:
+            sink.startTestRun()
+        self._in_run = True
+
+    def stopTestRun(self):
+        super(StreamResultRouter, self).stopTestRun()
+        for sink in self._sinks:
+            sink.stopTestRun()
+        self._in_run = False
+
+    def status(self, **kwargs):
+        route_code = kwargs.get('route_code', None)
+        test_id = kwargs.get('test_id', None)
+        if route_code is not None:
+            prefix = route_code.split('/')[0]
+        else:
+            prefix = route_code
+        if prefix in self._route_code_prefixes:
+            target, consume_route = self._route_code_prefixes[prefix]
+            if route_code is not None and consume_route:
+                route_code = route_code[len(prefix) + 1:]
+                if not route_code:
+                    route_code = None
+                kwargs['route_code'] = route_code
+        elif test_id in self._test_ids:
+            target = self._test_ids[test_id]
+        else:
+            target = self.fallback
+        target.status(**kwargs)
+
+    def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args):
+        """Add a rule to route events to sink when they match a given policy.
+
+        :param sink: A StreamResult to receive events.
+        :param policy: A routing policy. Valid policies are
+            'route_code_prefix' and 'test_id'.
+        :param do_start_stop_run: If True then startTestRun and stopTestRun
+            events will be passed onto this sink.
+
+        :raises: ValueError if the policy is unknown
+        :raises: TypeError if the policy is given arguments it cannot handle.
+
+        ``route_code_prefix`` routes events based on a prefix of the route
+        code in the event. It takes a ``route_prefix`` argument to match on
+        (e.g. '0') and a ``consume_route`` argument, which, if True, removes
+        the prefix from the ``route_code`` when forwarding events.
+
+        ``test_id`` routes events based on the test id.  It takes a single
+        argument, ``test_id``.  Use ``None`` to select non-test events.
+        """
+        policy_method = StreamResultRouter._policies.get(policy, None)
+        if not policy_method:
+            raise ValueError("bad policy %r" % (policy,))
+        policy_method(self, sink, **policy_args)
+        if do_start_stop_run:
+            self._sinks.append(sink)
+        if self._in_run:
+            sink.startTestRun()
+
+    def _map_route_code_prefix(self, sink, route_prefix, consume_route=False):
+        if '/' in route_prefix:
+            raise TypeError(
+                "%r is more than one route step long" % (route_prefix,))
+        self._route_code_prefixes[route_prefix] = (sink, consume_route)
+    _policies['route_code_prefix'] = _map_route_code_prefix
+
+    def _map_test_id(self, sink, test_id):
+        self._test_ids[test_id] = sink
+    _policies['test_id'] = _map_test_id
+
+
+class StreamTagger(CopyStreamResult):
+    """Adds or discards tags from StreamResult events."""
+
+    def __init__(self, targets, add=None, discard=None):
+        """Create a StreamTagger.
+
+        :param targets: A list of targets to forward events onto.
+        :param add: Either None or an iterable of tags to add to each event.
+        :param discard: Either None or an iterable of tags to discard from each
+            event.
+        """
+        super(StreamTagger, self).__init__(targets)
+        self.add = frozenset(add or ())
+        self.discard = frozenset(discard or ())
+
+    def status(self, *args, **kwargs):
+        test_tags = kwargs.get('test_tags') or set()
+        test_tags.update(self.add)
+        test_tags.difference_update(self.discard)
+        kwargs['test_tags'] = test_tags or None
+        super(StreamTagger, self).status(*args, **kwargs)
+
+
+class StreamToDict(StreamResult):
+    """A specialised StreamResult that emits a callback as tests complete.
+
+    Top level file attachments are simply discarded. Hung tests are detected
+    by stopTestRun and notified there and then.
+
+    The callback is passed a dict with the following keys:
+
+      * id: the test id.
+      * tags: The tags for the test. A set of unicode strings.
+      * details: A dict of file attachments - ``testtools.content.Content``
+        objects.
+      * status: One of the StreamResult status codes (including inprogress) or
+        'unknown' (used if only file events for a test were received...)
+      * timestamps: A pair of timestamps - the first one received with this
+        test id, and the one in the event that triggered the notification.
+        Hung tests have a None for the second end event. Timestamps are not
+        compared - their ordering is purely order received in the stream.
+
+    Only the most recent tags observed in the stream are reported.
+    """
+
+    def __init__(self, on_test):
+        """Create a StreamToDict calling on_test on test completions.
+
+        :param on_test: A callback that accepts one parameter - a dict
+            describing a test.
+        """
+        super(StreamToDict, self).__init__()
+        self.on_test = on_test
+        if parse_mime_type is None:
+            raise ImportError("mimeparse module missing.")
+
+    def startTestRun(self):
+        super(StreamToDict, self).startTestRun()
+        self._inprogress = {}
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        super(StreamToDict, self).status(test_id, test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        key = self._ensure_key(test_id, route_code, timestamp)
+        # update fields
+        if not key:
+            return
+        if test_status is not None:
+            self._inprogress[key]['status'] = test_status
+        self._inprogress[key]['timestamps'][1] = timestamp
+        case = self._inprogress[key]
+        if file_name is not None:
+            if file_name not in case['details']:
+                if mime_type is None:
+                    mime_type = 'application/octet-stream'
+                primary, sub, parameters = parse_mime_type(mime_type)
+                if 'charset' in parameters:
+                    if ',' in parameters['charset']:
+                        # testtools was emitting a bad encoding, workaround it,
+                        # Though this does lose data - probably want to drop
+                        # this in a few releases.
+                        parameters['charset'] = parameters['charset'][
+                            :parameters['charset'].find(',')]
+                content_type = ContentType(primary, sub, parameters)
+                content_bytes = []
+                case['details'][file_name] = Content(
+                    content_type, lambda:content_bytes)
+            case['details'][file_name].iter_bytes().append(file_bytes)
+        if test_tags is not None:
+            self._inprogress[key]['tags'] = test_tags
+        # notify completed tests.
+        if test_status not in (None, 'inprogress'):
+            self.on_test(self._inprogress.pop(key))
+
+    def stopTestRun(self):
+        super(StreamToDict, self).stopTestRun()
+        while self._inprogress:
+            case = self._inprogress.popitem()[1]
+            case['timestamps'][1] = None
+            self.on_test(case)
+
+    def _ensure_key(self, test_id, route_code, timestamp):
+        if test_id is None:
+            return
+        key = (test_id, route_code)
+        if key not in self._inprogress:
+            self._inprogress[key] = {
+                'id': test_id,
+                'tags': set(),
+                'details': {},
+                'status': 'unknown',
+                'timestamps': [timestamp, None]}
+        return key
+
+
+_status_map = {
+    'inprogress': 'addFailure',
+    'unknown': 'addFailure',
+    'success': 'addSuccess',
+    'skip': 'addSkip',
+    'fail': 'addFailure',
+    'xfail': 'addExpectedFailure',
+    'uxsuccess': 'addUnexpectedSuccess',
+    }
+
+
+def test_dict_to_case(test_dict):
+    """Convert a test dict into a TestCase object.
+
+    :param test_dict: A test dict as generated by StreamToDict.
+    :return: A PlaceHolder test object.
+    """
+    # Circular import.
+    global PlaceHolder
+    if PlaceHolder is None:
+        from testtools.testcase import PlaceHolder
+    outcome = _status_map[test_dict['status']]
+    return PlaceHolder(test_dict['id'], outcome=outcome,
+        details=test_dict['details'], tags=test_dict['tags'],
+        timestamps=test_dict['timestamps'])
+
+
+class StreamSummary(StreamToDict):
+    """A specialised StreamResult that summarises a stream.
+
+    The summary uses the same representation as the original
+    unittest.TestResult contract, allowing it to be consumed by any test
+    runner.
+    """
+
+    def __init__(self):
+        super(StreamSummary, self).__init__(self._gather_test)
+        self._handle_status = {
+            'success': self._success,
+            'skip': self._skip,
+            'exists': self._exists,
+            'fail': self._fail,
+            'xfail': self._xfail,
+            'uxsuccess': self._uxsuccess,
+            'unknown': self._incomplete,
+            'inprogress': self._incomplete,
+            }
+
+    def startTestRun(self):
+        super(StreamSummary, self).startTestRun()
+        self.failures = []
+        self.errors = []
+        self.testsRun = 0
+        self.skipped = []
+        self.expectedFailures = []
+        self.unexpectedSuccesses = []
+
+    def wasSuccessful(self):
+        """Return False if any failure has occured.
+
+        Note that incomplete tests can only be detected when stopTestRun is
+        called, so that should be called before checking wasSuccessful.
+        """
+        return (not self.failures and not self.errors)
+
+    def _gather_test(self, test_dict):
+        if test_dict['status'] == 'exists':
+            return
+        self.testsRun += 1
+        case = test_dict_to_case(test_dict)
+        self._handle_status[test_dict['status']](case)
+
+    def _incomplete(self, case):
+        self.errors.append((case, "Test did not complete"))
+
+    def _success(self, case):
+        pass
+
+    def _skip(self, case):
+        if 'reason' not in case._details:
+            reason = "Unknown"
+        else:
+            reason = case._details['reason'].as_text()
+        self.skipped.append((case, reason))
+
+    def _exists(self, case):
+        pass
+
+    def _fail(self, case):
+        message = _details_to_str(case._details, special="traceback")
+        self.errors.append((case, message))
+
+    def _xfail(self, case):
+        message = _details_to_str(case._details, special="traceback")
+        self.expectedFailures.append((case, message))
+
+    def _uxsuccess(self, case):
+        case._outcome = 'addUnexpectedSuccess'
+        self.unexpectedSuccesses.append(case)
+
+
+class TestControl(object):
+    """Controls a running test run, allowing it to be interrupted.
+
+    :ivar shouldStop: If True, tests should not run and should instead
+        return immediately. Similarly a TestSuite should check this between
+        each test and if set stop dispatching any new tests and return.
+    """
+
+    def __init__(self):
+        super(TestControl, self).__init__()
+        self.shouldStop = False
+
+    def stop(self):
+        """Indicate that tests should stop running."""
+        self.shouldStop = True
+
+
 class MultiTestResult(TestResult):
     """A test result that dispatches to many test results."""
 
@@ -737,6 +1281,241 @@ class ExtendedToOriginalDecorator(object):
         return self.decorated.wasSuccessful()
 
 
+class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl):
+    """Permit using old TestResult API code with new StreamResult objects.
+
+    This decorates a StreamResult and converts old (Python 2.6 / 2.7 /
+    Extended) TestResult API calls into StreamResult calls.
+
+    It also supports regular StreamResult calls, making it safe to wrap around
+    any StreamResult.
+    """
+
+    def __init__(self, decorated):
+        super(ExtendedToStreamDecorator, self).__init__([decorated])
+        # Deal with mismatched base class constructors.
+        TestControl.__init__(self)
+        self._started = False
+
+    def _get_failfast(self):
+        return len(self.targets) == 2
+    def _set_failfast(self, value):
+        if value:
+            if len(self.targets) == 2:
+                return
+            self.targets.append(StreamFailFast(self.stop))
+        else:
+            del self.targets[1:]
+    failfast = property(_get_failfast, _set_failfast)
+
+    def startTest(self, test):
+        if not self._started:
+            self.startTestRun()
+        self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now())
+        self._tags = TagContext(self._tags)
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+
+    def addError(self, test, err=None, details=None):
+        self._check_args(err, details)
+        self._convert(test, err, details, 'fail')
+    addFailure = addError
+
+    def _convert(self, test, err, details, status, reason=None):
+        if not self._started:
+            self.startTestRun()
+        test_id = test.id()
+        now = self._now()
+        if err is not None:
+            if details is None:
+                details = {}
+            details['traceback'] = TracebackContent(err, test)
+        if details is not None:
+            for name, content in details.items():
+                mime_type = repr(content.content_type)
+                file_bytes = None
+                for next_bytes in content.iter_bytes():
+                    if file_bytes is not None:
+                        self.status(file_name=name, file_bytes=file_bytes,
+                            mime_type=mime_type, test_id=test_id, timestamp=now)
+                    file_bytes = next_bytes
+                self.status(file_name=name, file_bytes=file_bytes, eof=True,
+                    mime_type=mime_type, test_id=test_id, timestamp=now)
+        if reason is not None:
+            self.status(file_name='reason', file_bytes=reason.encode('utf8'),
+                eof=True, mime_type="text/plain; charset=utf8",
+                test_id=test_id, timestamp=now)
+        self.status(test_id=test_id, test_status=status,
+            test_tags=self.current_tags, timestamp=now)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._check_args(err, details)
+        self._convert(test, err, details, 'xfail')
+
+    def addSkip(self, test, reason=None, details=None):
+        self._convert(test, None, details, 'skip', reason)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._convert(test, None, details, 'uxsuccess')
+
+    def addSuccess(self, test, details=None):
+        self._convert(test, None, details, 'success')
+
+    def _check_args(self, err, details):
+        param_count = 0
+        if err is not None:
+            param_count += 1
+        if details is not None:
+            param_count += 1
+        if param_count != 1:
+            raise ValueError("Must pass only one of err '%s' and details '%s"
+                % (err, details))
+
+    def startTestRun(self):
+        super(ExtendedToStreamDecorator, self).startTestRun()
+        self._tags = TagContext()
+        self.shouldStop = False
+        self.__now = None
+        self._started = True
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+
+    @property
+    def current_tags(self):
+        """The currently set tags."""
+        return self._tags.get_current_tags()
+
+    def tags(self, new_tags, gone_tags):
+        """Add and remove tags from the test.
+
+        :param new_tags: A set of tags to be added to the stream.
+        :param gone_tags: A set of tags to be removed from the stream.
+        """
+        self._tags.change_tags(new_tags, gone_tags)
+
+    def _now(self):
+        """Return the current 'test time'.
+
+        If the time() method has not been called, this is equivalent to
+        datetime.now(), otherwise its the last supplied datestamp given to the
+        time() method.
+        """
+        if self.__now is None:
+            return datetime.datetime.now(utc)
+        else:
+            return self.__now
+
+    def time(self, a_datetime):
+        self.__now = a_datetime
+
+    def wasSuccessful(self):
+        if not self._started:
+            self.startTestRun()
+        return super(ExtendedToStreamDecorator, self).wasSuccessful()
+
+
+class StreamToExtendedDecorator(StreamResult):
+    """Convert StreamResult API calls into ExtendedTestResult calls.
+
+    This will buffer all calls for all concurrently active tests, and
+    then flush each test as they complete.
+
+    Incomplete tests will be flushed as errors when the test run stops.
+
+    Non test file attachments are accumulated into a test called
+    'testtools.extradata' flushed at the end of the run.
+    """
+
+    def __init__(self, decorated):
+        # ExtendedToOriginalDecorator takes care of thunking details back to
+        # exceptions/reasons etc.
+        self.decorated = ExtendedToOriginalDecorator(decorated)
+        # StreamToDict buffers and gives us individual tests.
+        self.hook = StreamToDict(self._handle_tests)
+
+    def status(self, test_id=None, test_status=None, *args, **kwargs):
+        if test_status == 'exists':
+            return
+        self.hook.status(
+            test_id=test_id, test_status=test_status, *args, **kwargs)
+
+    def startTestRun(self):
+        self.decorated.startTestRun()
+        self.hook.startTestRun()
+
+    def stopTestRun(self):
+        self.hook.stopTestRun()
+        self.decorated.stopTestRun()
+
+    def _handle_tests(self, test_dict):
+        case = test_dict_to_case(test_dict)
+        case.run(self.decorated)
+
+
+class StreamToQueue(StreamResult):
+    """A StreamResult which enqueues events as a dict to a queue.Queue.
+
+    Events have their route code updated to include the route code
+    StreamToQueue was constructed with before they are submitted. If the event
+    route code is None, it is replaced with the StreamToQueue route code,
+    otherwise it is prefixed with the supplied code + a hyphen.
+
+    startTestRun and stopTestRun are forwarded to the queue. Implementors that
+    dequeue events back into StreamResult calls should take care not to call
+    startTestRun / stopTestRun on other StreamResult objects multiple times
+    (e.g. by filtering startTestRun and stopTestRun).
+
+    ``StreamToQueue`` is typically used by
+    ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue``
+    per thread, forwards status events to the the StreamResult that
+    ``ConcurrentStreamTestSuite.run()`` was called with, and uses the
+    stopTestRun event to trigger calling join() on the each thread.
+
+    Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes
+    place - any event supplied to a StreamToQueue will be inserted into the
+    queue immediately.
+
+    Events are forwarded as a dict with a key ``event`` which is one of
+    ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is
+    ``status`` the dict also has keys matching the keyword arguments
+    of ``StreamResult.status``, otherwise it has one other key ``result`` which
+    is the result that invoked ``startTestRun``.
+    """
+
+    def __init__(self, queue, routing_code):
+        """Create a StreamToQueue forwarding to target.
+
+        :param queue: A ``queue.Queue`` to receive events.
+        :param routing_code: The routing code to apply to messages.
+        """
+        super(StreamToQueue, self).__init__()
+        self.queue = queue
+        self.routing_code = routing_code
+
+    def startTestRun(self):
+        self.queue.put(dict(event='startTestRun', result=self))
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        self.queue.put(dict(event='status', test_id=test_id,
+            test_status=test_status, test_tags=test_tags, runnable=runnable,
+            file_name=file_name, file_bytes=file_bytes, eof=eof,
+            mime_type=mime_type, route_code=self.route_code(route_code),
+            timestamp=timestamp))
+
+    def stopTestRun(self):
+        self.queue.put(dict(event='stopTestRun', result=self))
+
+    def route_code(self, route_code):
+        """Adjust route_code on the way through."""
+        if route_code is None:
+            return self.routing_code
+        return self.routing_code + _u("/") + route_code
+
+
 class TestResultDecorator(object):
     """General pass-through decorator.
 
@@ -901,6 +1680,23 @@ class TestByTestResult(TestResult):
         self._details = details
 
 
+class TimestampingStreamResult(CopyStreamResult):
+    """A StreamResult decorator that assigns a timestamp when none is present.
+
+    This is convenient for ensuring events are timestamped.
+    """
+
+    def __init__(self, target):
+        super(TimestampingStreamResult, self).__init__([target])
+
+    def status(self, *args, **kwargs):
+        timestamp = kwargs.pop('timestamp', None)
+        if timestamp is None:
+            timestamp = datetime.datetime.now(utc)
+        super(TimestampingStreamResult, self).status(
+            *args, timestamp=timestamp, **kwargs)
+
+
 class _StringException(Exception):
     """An exception made from an arbitrary string."""
 
diff --git a/lib/testtools/testtools/tests/__init__.py b/lib/testtools/testtools/tests/__init__.py
index df9d44b..d40fcb3 100644
--- a/lib/testtools/testtools/tests/__init__.py
+++ b/lib/testtools/testtools/tests/__init__.py
@@ -1,6 +1,7 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
 """Tests for testtools itself."""
 
-# See README for copyright and licensing details.
 
 from unittest import TestSuite
 
@@ -8,6 +9,7 @@ from unittest import TestSuite
 def test_suite():
     from testtools.tests import (
         matchers,
+        test_assert_that,
         test_compat,
         test_content,
         test_content_type,
@@ -26,6 +28,7 @@ def test_suite():
         )
     modules = [
         matchers,
+        test_assert_that,
         test_compat,
         test_content,
         test_content_type,
diff --git a/lib/testtools/testtools/tests/helpers.py b/lib/testtools/testtools/tests/helpers.py
index ade2d96..f766da3 100644
--- a/lib/testtools/testtools/tests/helpers.py
+++ b/lib/testtools/testtools/tests/helpers.py
@@ -8,11 +8,10 @@ __all__ = [
 
 import sys
 
+from extras import safe_hasattr
+
 from testtools import TestResult
-from testtools.helpers import (
-    safe_hasattr,
-    )
-from testtools.content import TracebackContent
+from testtools.content import StackLinesContent
 from testtools import runtest
 
 
@@ -84,12 +83,12 @@ class LoggingResult(TestResult):
 
 
 def is_stack_hidden():
-    return TracebackContent.HIDE_INTERNAL_STACK
+    return StackLinesContent.HIDE_INTERNAL_STACK
 
 
 def hide_testtools_stack(should_hide=True):
-    result = TracebackContent.HIDE_INTERNAL_STACK
-    TracebackContent.HIDE_INTERNAL_STACK = should_hide
+    result = StackLinesContent.HIDE_INTERNAL_STACK
+    StackLinesContent.HIDE_INTERNAL_STACK = should_hide
     return result
 
 
diff --git a/lib/testtools/testtools/tests/matchers/test_basic.py b/lib/testtools/testtools/tests/matchers/test_basic.py
index 1109fa4..c53bc9e 100644
--- a/lib/testtools/testtools/tests/matchers/test_basic.py
+++ b/lib/testtools/testtools/tests/matchers/test_basic.py
@@ -19,6 +19,7 @@ from testtools.matchers._basic import (
     IsInstance,
     LessThan,
     GreaterThan,
+    HasLength,
     MatchesRegex,
     NotEquals,
     SameMembers,
@@ -35,8 +36,14 @@ class Test_BinaryMismatch(TestCase):
     _long_b = _b(_long_string)
     _long_u = _u(_long_string)
 
+    class CustomRepr(object):
+        def __init__(self, repr_string):
+            self._repr_string = repr_string
+        def __repr__(self):
+            return _u('<object ') + _u(self._repr_string) + _u('>')
+
     def test_short_objects(self):
-        o1, o2 = object(), object()
+        o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
         mismatch = _BinaryMismatch(o1, "!~", o2)
         self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
 
@@ -369,6 +376,21 @@ class TestMatchesRegex(TestCase, TestMatchersInterface):
         ]
 
 
+class TestHasLength(TestCase, TestMatchersInterface):
+
+    matches_matcher = HasLength(2)
+    matches_matches = [[1, 2]]
+    matches_mismatches = [[], [1], [3, 2, 1]]
+
+    str_examples = [
+        ("HasLength(2)", HasLength(2)),
+        ]
+
+    describe_examples = [
+        ("len([]) != 1", [], HasLength(1)),
+        ]
+
+
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_dict.py b/lib/testtools/testtools/tests/matchers/test_dict.py
index c6e2c9c..00368dd 100644
--- a/lib/testtools/testtools/tests/matchers/test_dict.py
+++ b/lib/testtools/testtools/tests/matchers/test_dict.py
@@ -30,7 +30,7 @@ class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
         ]
 
 
-class TestKeysEqual(TestCase, TestMatchersInterface):
+class TestKeysEqualWithList(TestCase, TestMatchersInterface):
 
     matches_matcher = KeysEqual('foo', 'bar')
     matches_matches = [
@@ -60,6 +60,11 @@ class TestKeysEqual(TestCase, TestMatchersInterface):
                 % (matchee,)))
 
 
+class TestKeysEqualWithDict(TestKeysEqualWithList):
+
+    matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
+
+
 class TestSubDictOf(TestCase, TestMatchersInterface):
 
     matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
diff --git a/lib/testtools/testtools/tests/matchers/test_exception.py b/lib/testtools/testtools/tests/matchers/test_exception.py
index ef7185f..a74043a 100644
--- a/lib/testtools/testtools/tests/matchers/test_exception.py
+++ b/lib/testtools/testtools/tests/matchers/test_exception.py
@@ -163,12 +163,7 @@ class TestRaisesBaseTypes(TestCase):
         # Exception, it is propogated.
         match_keyb = Raises(MatchesException(KeyboardInterrupt))
         def raise_keyb_from_match():
-            if sys.version_info > (2, 5):
-                matcher = Raises(MatchesException(Exception))
-            else:
-                # On Python 2.4 KeyboardInterrupt is a StandardError subclass
-                # but should propogate from less generic exception matchers
-                matcher = Raises(MatchesException(EnvironmentError))
+            matcher = Raises(MatchesException(Exception))
             matcher.match(self.raiser)
         self.assertThat(raise_keyb_from_match, match_keyb)
 
diff --git a/lib/testtools/testtools/tests/matchers/test_higherorder.py b/lib/testtools/testtools/tests/matchers/test_higherorder.py
index c5cc44e..fb86b7f 100644
--- a/lib/testtools/testtools/tests/matchers/test_higherorder.py
+++ b/lib/testtools/testtools/tests/matchers/test_higherorder.py
@@ -18,6 +18,7 @@ from testtools.matchers._higherorder import (
     MatchesAny,
     MatchesAll,
     MatchesPredicate,
+    MatchesPredicateWithParams,
     Not,
     )
 from testtools.tests.helpers import FullStackRunTest
@@ -222,6 +223,32 @@ class TestMatchesPredicate(TestCase, TestMatchersInterface):
         ]
 
 
+def between(x, low, high):
+    return low < x < high
+
+
+class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesPredicateWithParams(
+        between, "{0} is not between {1} and {2}")(1, 9)
+    matches_matches = [2, 4, 6, 8]
+    matches_mismatches = [0, 1, 9, 10]
+
+    str_examples = [
+        ("MatchesPredicateWithParams(%r, %r)(%s)" % (
+            between, "{0} is not between {1} and {2}", "1, 2"),
+         MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}")(1, 2)),
+        ("Between(1, 2)", MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}", "Between")(1, 2)),
+        ]
+
+    describe_examples = [
+        ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}")(2, 3)),
+        ]
+
+
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_assert_that.py b/lib/testtools/testtools/tests/test_assert_that.py
new file mode 100644
index 0000000..66b4166
--- /dev/null
+++ b/lib/testtools/testtools/tests/test_assert_that.py
@@ -0,0 +1,152 @@
+from doctest import ELLIPSIS
+
+from testtools import (
+    TestCase,
+    )
+from testtools.assertions import (
+    assert_that,
+    )
+from testtools.compat import (
+    _u,
+    )
+from testtools.content import (
+    TracebackContent,
+    )
+from testtools.matchers import (
+    Annotate,
+    DocTestMatches,
+    Equals,
+    )
+
+
+class AssertThatTests(object):
+    """A mixin containing shared tests for assertThat and assert_that."""
+
+    def assert_that_callable(self, *args, **kwargs):
+        raise NotImplementedError
+
+    def assertFails(self, message, function, *args, **kwargs):
+        """Assert that function raises a failure with the given message."""
+        failure = self.assertRaises(
+            self.failureException, function, *args, **kwargs)
+        self.assert_that_callable(failure, DocTestMatches(message, ELLIPSIS))
+
+    def test_assertThat_matches_clean(self):
+        class Matcher(object):
+            def match(self, foo):
+                return None
+        self.assert_that_callable("foo", Matcher())
+
+    def test_assertThat_mismatch_raises_description(self):
+        calls = []
+        class Mismatch(object):
+            def __init__(self, thing):
+                self.thing = thing
+            def describe(self):
+                calls.append(('describe_diff', self.thing))
+                return "object is not a thing"
+            def get_details(self):
+                return {}
+        class Matcher(object):
+            def match(self, thing):
+                calls.append(('match', thing))
+                return Mismatch(thing)
+            def __str__(self):
+                calls.append(('__str__',))
+                return "a description"
+        class Test(type(self)):
+            def test(self):
+                self.assert_that_callable("foo", Matcher())
+        result = Test("test").run()
+        self.assertEqual([
+            ('match', "foo"),
+            ('describe_diff', "foo"),
+            ], calls)
+        self.assertFalse(result.wasSuccessful())
+
+    def test_assertThat_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = matcher.match(matchee).describe()
+        self.assertFails(expected, self.assert_that_callable, matchee, matcher)
+
+    def test_assertThat_message_is_annotated(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = Annotate('woo', matcher).match(matchee).describe()
+        self.assertFails(expected,
+                         self.assert_that_callable, matchee, matcher, 'woo')
+
+    def test_assertThat_verbose_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = (
+            'Match failed. Matchee: %r\n'
+            'Matcher: %s\n'
+            'Difference: %s\n' % (
+                matchee,
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        self.assertFails(
+            expected,
+            self.assert_that_callable, matchee, matcher, verbose=True)
+
+    def get_error_string(self, e):
+        """Get the string showing how 'e' would be formatted in test output.
+
+        This is a little bit hacky, since it's designed to give consistent
+        output regardless of Python version.
+
+        In testtools, TestResult._exc_info_to_unicode is the point of dispatch
+        between various different implementations of methods that format
+        exceptions, so that's what we have to call. However, that method cares
+        about stack traces and formats the exception class. We don't care
+        about either of these, so we take its output and parse it a little.
+        """
+        error = TracebackContent((e.__class__, e, None), self).as_text()
+        # We aren't at all interested in the traceback.
+        if error.startswith('Traceback (most recent call last):\n'):
+            lines = error.splitlines(True)[1:]
+            for i, line in enumerate(lines):
+                if not line.startswith(' '):
+                    break
+            error = ''.join(lines[i:])
+        # We aren't interested in how the exception type is formatted.
+        exc_class, error = error.split(': ', 1)
+        return error
+
+    def test_assertThat_verbose_unicode(self):
+        # When assertThat is given matchees or matchers that contain non-ASCII
+        # unicode strings, we can still provide a meaningful error.
+        matchee = _u('\xa7')
+        matcher = Equals(_u('a'))
+        expected = (
+            'Match failed. Matchee: %s\n'
+            'Matcher: %s\n'
+            'Difference: %s\n\n' % (
+                repr(matchee).replace("\\xa7", matchee),
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        e = self.assertRaises(
+            self.failureException, self.assert_that_callable, matchee, matcher,
+            verbose=True)
+        self.assertEqual(expected, self.get_error_string(e))
+
+
+class TestAssertThatFunction(AssertThatTests, TestCase):
+
+    def assert_that_callable(self, *args, **kwargs):
+        return assert_that(*args, **kwargs)
+
+
+class TestAssertThatMethod(AssertThatTests, TestCase):
+
+    def assert_that_callable(self, *args, **kwargs):
+        return self.assertThat(*args, **kwargs)
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_compat.py b/lib/testtools/testtools/tests/test_compat.py
index b29dc34..84e57be 100644
--- a/lib/testtools/testtools/tests/test_compat.py
+++ b/lib/testtools/testtools/tests/test_compat.py
@@ -2,6 +2,7 @@
 
 """Tests for miscellaneous compatibility functions"""
 
+import io
 import linecache
 import os
 import sys
@@ -13,6 +14,9 @@ import testtools
 from testtools.compat import (
     _b,
     _detect_encoding,
+    _format_exc_info,
+    _format_exception_only,
+    _format_stack_list,
     _get_source_encoding,
     _u,
     reraise,
@@ -21,6 +25,9 @@ from testtools.compat import (
     unicode_output_stream,
     )
 from testtools.matchers import (
+    Equals,
+    Is,
+    IsInstance,
     MatchesException,
     Not,
     Raises,
@@ -106,7 +113,8 @@ class TestDetectEncoding(testtools.TestCase):
             '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
         self._check_encoding("utf-8", (
             "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
-            '"""Module docstring say \xe2\x98\x86"""\n'))
+            '"""Module docstring say \xe2\x98\x86"""\n'),
+            possibly_invalid=True)
 
     def test_multiple_coding_comments(self):
         """Test only the first of multiple coding declarations counts"""
@@ -256,12 +264,30 @@ class TestUnicodeOutputStream(testtools.TestCase):
             newio = True
         sout = StringIO()
         soutwrapper = unicode_output_stream(sout)
-        if newio:
-            self.expectFailure("Python 3 StringIO expects text not bytes",
-                self.assertThat, lambda: soutwrapper.write(self.uni),
-                Not(Raises(MatchesException(TypeError))))
         soutwrapper.write(self.uni)
-        self.assertEqual("pa???n", sout.getvalue())
+        if newio:
+            self.assertEqual(self.uni, sout.getvalue())
+        else:
+            self.assertEqual("pa???n", sout.getvalue())
+
+    def test_io_stringio(self):
+        # io.StringIO only accepts unicode so should be returned as itself.
+        s = io.StringIO()
+        self.assertEqual(s, unicode_output_stream(s))
+
+    def test_io_bytesio(self):
+        # io.BytesIO only accepts bytes so should be wrapped.
+        bytes_io = io.BytesIO()
+        self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
+        # Will error if s was not wrapped properly.
+        unicode_output_stream(bytes_io).write(_u('foo'))
+
+    def test_io_textwrapper(self):
+        # textwrapper is unicode, should be returned as itself.
+        text_io = io.TextIOWrapper(io.BytesIO())
+        self.assertThat(unicode_output_stream(text_io), Is(text_io))
+        # To be sure...
+        unicode_output_stream(text_io).write(_u('foo'))
 
 
 class TestTextRepr(testtools.TestCase):
@@ -427,6 +453,151 @@ class TestReraise(testtools.TestCase):
         self.assertRaises(CustomException, reraise, *_exc_info)
 
 
+class Python2CompatibilityTests(testtools.TestCase):
+
+    def setUp(self):
+        super(Python2CompatibilityTests, self).setUp()
+        if sys.version[0] >= '3':
+            self.skip("These tests are only applicable to python 2.")
+
+
+class TestExceptionFormatting(Python2CompatibilityTests):
+    """Test the _format_exception_only function."""
+
+    def _assert_exception_format(self, eclass, evalue, expected):
+        actual = _format_exception_only(eclass, evalue)
+        self.assertThat(actual, Equals(expected))
+        self.assertThat(''.join(actual), IsInstance(unicode))
+
+    def test_supports_string_exception(self):
+        self._assert_exception_format(
+            "String_Exception",
+            None,
+            [_u("String_Exception\n")]
+        )
+
+    def test_supports_regular_exception(self):
+        self._assert_exception_format(
+            RuntimeError,
+            RuntimeError("Something went wrong"),
+            [_u("RuntimeError: Something went wrong\n")]
+        )
+
+    def test_supports_unprintable_exceptions(self):
+        """Verify support for exception classes that raise an exception when
+        __unicode__ or __str__ is called.
+        """
+        class UnprintableException(Exception):
+
+            def __str__(self):
+                raise Exception()
+
+            def __unicode__(self):
+                raise Exception()
+
+        self._assert_exception_format(
+            UnprintableException,
+            UnprintableException("Foo"),
+            [_u("UnprintableException: <unprintable UnprintableException object>\n")]
+        )
+
+    def test_supports_exceptions_with_no_string_value(self):
+        class NoStringException(Exception):
+
+            def __str__(self):
+                return ""
+
+            def __unicode__(self):
+                return _u("")
+
+        self._assert_exception_format(
+            NoStringException,
+            NoStringException("Foo"),
+            [_u("NoStringException\n")]
+        )
+
+    def test_supports_strange_syntax_error(self):
+        """Test support for syntax errors with unusual number of arguments"""
+        self._assert_exception_format(
+            SyntaxError,
+            SyntaxError("Message"),
+            [_u("SyntaxError: Message\n")]
+        )
+
+    def test_supports_syntax_error(self):
+        self._assert_exception_format(
+            SyntaxError,
+            SyntaxError(
+                "Some Syntax Message",
+                (
+                    "/path/to/file",
+                    12,
+                    2,
+                    "This is the line of code",
+                )
+            ),
+            [
+                _u('  File "/path/to/file", line 12\n'),
+                _u('    This is the line of code\n'),
+                _u('     ^\n'),
+                _u('SyntaxError: Some Syntax Message\n'),
+            ]
+        )
+
+
+class StackListFormattingTests(Python2CompatibilityTests):
+    """Test the _format_stack_list function."""
+
+    def _assert_stack_format(self, stack_lines, expected_output):
+        actual = _format_stack_list(stack_lines)
+        self.assertThat(actual, Equals([expected_output]))
+
+    def test_single_complete_stack_line(self):
+        stack_lines = [(
+            '/path/to/filename',
+            12,
+            'func_name',
+            'some_code()',
+        )]
+        expected = \
+            _u('  File "/path/to/filename", line 12, in func_name\n' \
+               '    some_code()\n')
+
+        self._assert_stack_format(stack_lines, expected)
+
+    def test_single_stack_line_no_code(self):
+        stack_lines = [(
+            '/path/to/filename',
+            12,
+            'func_name',
+            None
+        )]
+        expected = _u('  File "/path/to/filename", line 12, in func_name\n')
+        self._assert_stack_format(stack_lines, expected)
+
+
+class FormatExceptionInfoTests(Python2CompatibilityTests):
+
+    def test_individual_functions_called(self):
+        self.patch(
+            testtools.compat,
+            '_format_stack_list',
+            lambda stack_list: [_u("format stack list called\n")]
+        )
+        self.patch(
+            testtools.compat,
+            '_format_exception_only',
+            lambda etype, evalue: [_u("format exception only called\n")]
+        )
+        result = _format_exc_info(None, None, None)
+        expected = [
+            _u("Traceback (most recent call last):\n"),
+            _u("format stack list called\n"),
+            _u("format exception only called\n"),
+        ]
+        self.assertThat(expected, Equals(result))
+
+
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_content.py b/lib/testtools/testtools/tests/test_content.py
index bc72513..342ae23 100644
--- a/lib/testtools/testtools/tests/test_content.py
+++ b/lib/testtools/testtools/tests/test_content.py
@@ -5,12 +5,13 @@ import os
 import tempfile
 import unittest
 
-from testtools import TestCase
+from testtools import TestCase, skipUnless
 from testtools.compat import (
     _b,
     _u,
     BytesIO,
     StringIO,
+    str_is_unicode,
     )
 from testtools.content import (
     attach_file,
@@ -19,6 +20,8 @@ from testtools.content import (
     content_from_stream,
     JSON,
     json_content,
+    StackLinesContent,
+    StacktraceContent,
     TracebackContent,
     text_content,
     )
@@ -188,12 +191,60 @@ class TestContent(TestCase):
         expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
         self.assertEqual(expected, text_content(data))
 
+    @skipUnless(str_is_unicode, "Test only applies in python 3.")
+    def test_text_content_raises_TypeError_when_passed_bytes(self):
+        data = _b("Some Bytes")
+        self.assertRaises(TypeError, text_content, data)
+
     def test_json_content(self):
         data = {'foo': 'bar'}
         expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
         self.assertEqual(expected, json_content(data))
 
 
+class TestStackLinesContent(TestCase):
+
+    def _get_stack_line_and_expected_output(self):
+        stack_lines = [
+            ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
+        ]
+        expected = '  File "/path/to/file", line 42, in some_function\n' \
+                   '    print("Hello World")\n'
+        return stack_lines, expected
+
+    def test_single_stack_line(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        actual = StackLinesContent(stack_lines).as_text()
+
+        self.assertEqual(expected, actual)
+
+    def test_prefix_content(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        prefix = self.getUniqueString() + '\n'
+        content = StackLinesContent(stack_lines, prefix_content=prefix)
+        actual = content.as_text()
+        expected = prefix  + expected
+
+        self.assertEqual(expected, actual)
+
+    def test_postfix_content(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        postfix = '\n' + self.getUniqueString()
+        content = StackLinesContent(stack_lines, postfix_content=postfix)
+        actual = content.as_text()
+        expected = expected + postfix
+
+        self.assertEqual(expected, actual)
+
+    def test___init___sets_content_type(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        content = StackLinesContent(stack_lines)
+        expected_content_type = ContentType("text", "x-traceback",
+            {"language": "python", "charset": "utf8"})
+
+        self.assertEqual(expected_content_type, content.content_type)
+
+
 class TestTracebackContent(TestCase):
 
     def test___init___None_errors(self):
@@ -210,6 +261,33 @@ class TestTracebackContent(TestCase):
         self.assertEqual(expected, ''.join(list(content.iter_text())))
 
 
+class TestStacktraceContent(TestCase):
+
+    def test___init___sets_ivars(self):
+        content = StacktraceContent()
+        content_type = ContentType("text", "x-traceback",
+            {"language": "python", "charset": "utf8"})
+
+        self.assertEqual(content_type, content.content_type)
+
+    def test_prefix_is_used(self):
+        prefix = self.getUniqueString()
+        actual = StacktraceContent(prefix_content=prefix).as_text()
+
+        self.assertTrue(actual.startswith(prefix))
+
+    def test_postfix_is_used(self):
+        postfix = self.getUniqueString()
+        actual = StacktraceContent(postfix_content=postfix).as_text()
+
+        self.assertTrue(actual.endswith(postfix))
+
+    def test_top_frame_is_skipped_when_no_stack_is_specified(self):
+        actual = StacktraceContent().as_text()
+
+        self.assertTrue('testtools/content.py' not in actual)
+
+
 class TestAttachFile(TestCase):
 
     def make_file(self, data):
diff --git a/lib/testtools/testtools/tests/test_content_type.py b/lib/testtools/testtools/tests/test_content_type.py
index ecb8e3a..2d34f95 100644
--- a/lib/testtools/testtools/tests/test_content_type.py
+++ b/lib/testtools/testtools/tests/test_content_type.py
@@ -43,7 +43,7 @@ class TestContentType(TestCase):
         content_type = ContentType(
             'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
         self.assertThat(
-            repr(content_type), Equals('text/plain; baz="qux", foo="bar"'))
+            repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
 
 
 class TestBuiltinContentTypes(TestCase):
diff --git a/lib/testtools/testtools/tests/test_deferredruntest.py b/lib/testtools/testtools/tests/test_deferredruntest.py
index 3373c06..3310926 100644
--- a/lib/testtools/testtools/tests/test_deferredruntest.py
+++ b/lib/testtools/testtools/tests/test_deferredruntest.py
@@ -5,6 +5,8 @@
 import os
 import signal
 
+from extras import try_import
+
 from testtools import (
     skipIf,
     TestCase,
@@ -13,7 +15,6 @@ from testtools import (
 from testtools.content import (
     text_content,
     )
-from testtools.helpers import try_import
 from testtools.matchers import (
     Equals,
     KeysEqual,
@@ -52,6 +53,12 @@ class X(object):
             self.calls.append('tearDown')
             super(X.Base, self).tearDown()
 
+    class BaseExceptionRaised(Base):
+        expected_calls = ['setUp', 'tearDown', 'clean-up']
+        expected_results = [('addError', SystemExit)]
+        def test_something(self):
+            raise SystemExit(0)
+
     class ErrorInSetup(Base):
         expected_calls = ['setUp', 'clean-up']
         expected_results = [('addError', RuntimeError)]
@@ -102,7 +109,10 @@ class X(object):
         def test_runner(self):
             result = ExtendedTestResult()
             test = self.test_factory('test_something', runTest=self.runner)
-            test.run(result)
+            if self.test_factory is X.BaseExceptionRaised:
+                self.assertRaises(SystemExit, test.run, result)
+            else:
+                test.run(result)
             self.assertEqual(test.calls, self.test_factory.expected_calls)
             self.assertResultsMatch(test, result)
 
@@ -117,6 +127,7 @@ def make_integration_tests():
         ]
 
     tests = [
+        X.BaseExceptionRaised,
         X.ErrorInSetup,
         X.ErrorInTest,
         X.ErrorInTearDown,
@@ -545,7 +556,7 @@ class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
                 self.addCleanup(lambda: 3 / 0)
                 # Dirty the reactor.
                 from twisted.internet.protocol import ServerFactory
-                reactor.listenTCP(0, ServerFactory())
+                reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
                 # Unhandled error.
                 defer.maybeDeferred(lambda: 2 / 0)
                 # Actual error.
diff --git a/lib/testtools/testtools/tests/test_distutilscmd.py b/lib/testtools/testtools/tests/test_distutilscmd.py
index 59762df..fd0dd90 100644
--- a/lib/testtools/testtools/tests/test_distutilscmd.py
+++ b/lib/testtools/testtools/tests/test_distutilscmd.py
@@ -4,12 +4,13 @@
 
 from distutils.dist import Distribution
 
+from extras import try_import
+
 from testtools.compat import (
     _b,
     _u,
     BytesIO,
     )
-from testtools.helpers import try_import
 fixtures = try_import('fixtures')
 
 import testtools
@@ -60,8 +61,8 @@ class TestCommandTest(TestCase):
         dist.cmdclass = {'test': TestCommand}
         dist.command_options = {
             'test': {'test_module': ('command line', 'testtools.runexample')}}
-        cmd = dist.reinitialize_command('test')
         with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            cmd = dist.reinitialize_command('test')
             dist.run_command('test')
         self.assertThat(
             stdout.getDetails()['stdout'].as_text(),
@@ -82,8 +83,8 @@ OK
             'test': {
                 'test_suite': (
                     'command line', 'testtools.runexample.test_suite')}}
-        cmd = dist.reinitialize_command('test')
         with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            cmd = dist.reinitialize_command('test')
             dist.run_command('test')
         self.assertThat(
             stdout.getDetails()['stdout'].as_text(),
diff --git a/lib/testtools/testtools/tests/test_fixturesupport.py b/lib/testtools/testtools/tests/test_fixturesupport.py
index cff9eb4..e309045 100644
--- a/lib/testtools/testtools/tests/test_fixturesupport.py
+++ b/lib/testtools/testtools/tests/test_fixturesupport.py
@@ -2,13 +2,15 @@
 
 import unittest
 
+from extras import try_import
+
 from testtools import (
     TestCase,
     content,
     content_type,
     )
 from testtools.compat import _b, _u
-from testtools.helpers import try_import
+from testtools.matchers import Contains
 from testtools.testresult.doubles import (
     ExtendedTestResult,
     )
@@ -111,6 +113,32 @@ class TestFixtureSupport(TestCase):
         self.assertEqual(['content', 'traceback'], sorted(details))
         self.assertEqual('foobar', ''.join(details['content'].iter_text()))
 
+    def test_useFixture_original_exception_raised_if_gather_details_fails(self):
+        # In bug #1368440 it was reported that when a fixture fails setUp
+        # and gather_details errors on it, then the original exception that
+        # failed is not reported.
+        class BrokenFixture(fixtures.Fixture):
+            def getDetails(self):
+                raise AttributeError("getDetails broke")
+            def setUp(self):
+                fixtures.Fixture.setUp(self)
+                raise Exception("setUp broke")
+        fixture = BrokenFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = ExtendedTestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertEqual('addError', result._events[-2][0])
+        details = result._events[-2][2]
+        self.assertEqual(['traceback', 'traceback-1'], sorted(details))
+        self.assertThat(
+            ''.join(details['traceback'].iter_text()),
+            Contains('setUp broke'))
+        self.assertThat(
+            ''.join(details['traceback-1'].iter_text()),
+            Contains('getDetails broke'))
+
 
 def test_suite():
     from unittest import TestLoader
diff --git a/lib/testtools/testtools/tests/test_helpers.py b/lib/testtools/testtools/tests/test_helpers.py
index 98da534..848c2f0 100644
--- a/lib/testtools/testtools/tests/test_helpers.py
+++ b/lib/testtools/testtools/tests/test_helpers.py
@@ -1,196 +1,13 @@
 # Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
 
 from testtools import TestCase
-from testtools.helpers import (
-    try_import,
-    try_imports,
-    )
-from testtools.matchers import (
-    Equals,
-    Is,
-    Not,
-    )
 from testtools.tests.helpers import (
     FullStackRunTest,
     hide_testtools_stack,
     is_stack_hidden,
-    safe_hasattr,
     )
 
 
-def check_error_callback(test, function, arg, expected_error_count,
-    expect_result):
-    """General test template for error_callback argument.
-
-    :param test: Test case instance.
-    :param function: Either try_import or try_imports.
-    :param arg: Name or names to import.
-    :param expected_error_count: Expected number of calls to the callback.
-    :param expect_result: Boolean for whether a module should
-        ultimately be returned or not.
-    """
-    cb_calls = []
-    def cb(e):
-        test.assertIsInstance(e, ImportError)
-        cb_calls.append(e)
-    try:
-        result = function(arg, error_callback=cb)
-    except ImportError:
-        test.assertFalse(expect_result)
-    else:
-        if expect_result:
-            test.assertThat(result, Not(Is(None)))
-        else:
-            test.assertThat(result, Is(None))
-    test.assertEquals(len(cb_calls), expected_error_count)
-
-
-class TestSafeHasattr(TestCase):
-
-    def test_attribute_not_there(self):
-        class Foo(object):
-            pass
-        self.assertEqual(False, safe_hasattr(Foo(), 'anything'))
-
-    def test_attribute_there(self):
-        class Foo(object):
-            pass
-        foo = Foo()
-        foo.attribute = None
-        self.assertEqual(True, safe_hasattr(foo, 'attribute'))
-
-    def test_property_there(self):
-        class Foo(object):
-            @property
-            def attribute(self):
-                return None
-        foo = Foo()
-        self.assertEqual(True, safe_hasattr(foo, 'attribute'))
-
-    def test_property_raises(self):
-        class Foo(object):
-            @property
-            def attribute(self):
-                1/0
-        foo = Foo()
-        self.assertRaises(ZeroDivisionError, safe_hasattr, foo, 'attribute')
-
-
-class TestTryImport(TestCase):
-
-    def test_doesnt_exist(self):
-        # try_import('thing', foo) returns foo if 'thing' doesn't exist.
-        marker = object()
-        result = try_import('doesntexist', marker)
-        self.assertThat(result, Is(marker))
-
-    def test_None_is_default_alternative(self):
-        # try_import('thing') returns None if 'thing' doesn't exist.
-        result = try_import('doesntexist')
-        self.assertThat(result, Is(None))
-
-    def test_existing_module(self):
-        # try_import('thing', foo) imports 'thing' and returns it if it's a
-        # module that exists.
-        result = try_import('os', object())
-        import os
-        self.assertThat(result, Is(os))
-
-    def test_existing_submodule(self):
-        # try_import('thing.another', foo) imports 'thing' and returns it if
-        # it's a module that exists.
-        result = try_import('os.path', object())
-        import os
-        self.assertThat(result, Is(os.path))
-
-    def test_nonexistent_submodule(self):
-        # try_import('thing.another', foo) imports 'thing' and returns foo if
-        # 'another' doesn't exist.
-        marker = object()
-        result = try_import('os.doesntexist', marker)
-        self.assertThat(result, Is(marker))
-
-    def test_object_from_module(self):
-        # try_import('thing.object') imports 'thing' and returns
-        # 'thing.object' if 'thing' is a module and 'object' is not.
-        result = try_import('os.path.join')
-        import os
-        self.assertThat(result, Is(os.path.join))
-
-    def test_error_callback(self):
-        # the error callback is called on failures.
-        check_error_callback(self, try_import, 'doesntexist', 1, False)
-
-    def test_error_callback_missing_module_member(self):
-        # the error callback is called on failures to find an object
-        # inside an existing module.
-        check_error_callback(self, try_import, 'os.nonexistent', 1, False)
-
-    def test_error_callback_not_on_success(self):
-        # the error callback is not called on success.
-        check_error_callback(self, try_import, 'os.path', 0, True)
-
-
-class TestTryImports(TestCase):
-
-    def test_doesnt_exist(self):
-        # try_imports('thing', foo) returns foo if 'thing' doesn't exist.
-        marker = object()
-        result = try_imports(['doesntexist'], marker)
-        self.assertThat(result, Is(marker))
-
-    def test_fallback(self):
-        result = try_imports(['doesntexist', 'os'])
-        import os
-        self.assertThat(result, Is(os))
-
-    def test_None_is_default_alternative(self):
-        # try_imports('thing') returns None if 'thing' doesn't exist.
-        e = self.assertRaises(
-            ImportError, try_imports, ['doesntexist', 'noreally'])
-        self.assertThat(
-            str(e),
-            Equals("Could not import any of: doesntexist, noreally"))
-
-    def test_existing_module(self):
-        # try_imports('thing', foo) imports 'thing' and returns it if it's a
-        # module that exists.
-        result = try_imports(['os'], object())
-        import os
-        self.assertThat(result, Is(os))
-
-    def test_existing_submodule(self):
-        # try_imports('thing.another', foo) imports 'thing' and returns it if
-        # it's a module that exists.
-        result = try_imports(['os.path'], object())
-        import os
-        self.assertThat(result, Is(os.path))
-
-    def test_nonexistent_submodule(self):
-        # try_imports('thing.another', foo) imports 'thing' and returns foo if
-        # 'another' doesn't exist.
-        marker = object()
-        result = try_imports(['os.doesntexist'], marker)
-        self.assertThat(result, Is(marker))
-
-    def test_fallback_submodule(self):
-        result = try_imports(['os.doesntexist', 'os.path'])
-        import os
-        self.assertThat(result, Is(os.path))
-
-    def test_error_callback(self):
-        # One error for every class that doesn't exist.
-        check_error_callback(self, try_imports,
-            ['os.doesntexist', 'os.notthiseither'],
-            2, False)
-        check_error_callback(self, try_imports,
-            ['os.doesntexist', 'os.notthiseither', 'os'],
-            2, True)
-        check_error_callback(self, try_imports,
-            ['os.path'],
-            0, True)
-
-
 class TestStackHiding(TestCase):
 
     run_tests_with = FullStackRunTest
diff --git a/lib/testtools/testtools/tests/test_run.py b/lib/testtools/testtools/tests/test_run.py
index 5971a4b..ac4b9dd 100644
--- a/lib/testtools/testtools/tests/test_run.py
+++ b/lib/testtools/testtools/tests/test_run.py
@@ -3,26 +3,37 @@
 """Tests for the test runner logic."""
 
 from unittest import TestSuite
+import sys
+from textwrap import dedent
 
+from extras import try_import
+fixtures = try_import('fixtures')
+testresources = try_import('testresources')
+
+import testtools
+from testtools import TestCase, run, skipUnless
 from testtools.compat import (
     _b,
+    _u,
     StringIO,
     )
-from testtools.helpers import try_import
-fixtures = try_import('fixtures')
-
-import testtools
-from testtools import TestCase, run
-from testtools.matchers import Contains
+from testtools.matchers import (
+    Contains,
+    MatchesRegex,
+    )
 
 
 if fixtures:
     class SampleTestFixture(fixtures.Fixture):
         """Creates testtools.runexample temporarily."""
 
-        def __init__(self):
-            self.package = fixtures.PythonPackage(
-            'runexample', [('__init__.py', _b("""
+        def __init__(self, broken=False):
+            """Create a SampleTestFixture.
+
+            :param broken: If True, the sample file will not be importable.
+            """
+            if not broken:
+                init_contents = _b("""\
 from testtools import TestCase
 
 class TestFoo(TestCase):
@@ -33,13 +44,90 @@ class TestFoo(TestCase):
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
-"""))])
+""")
+            else:
+                init_contents = b"class not in\n"
+            self.package = fixtures.PythonPackage(
+            'runexample', [('__init__.py', init_contents)])
 
         def setUp(self):
             super(SampleTestFixture, self).setUp()
             self.useFixture(self.package)
             testtools.__path__.append(self.package.base)
             self.addCleanup(testtools.__path__.remove, self.package.base)
+            self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
+
+
+if fixtures and testresources:
+    class SampleResourcedFixture(fixtures.Fixture):
+        """Creates a test suite that uses testresources."""
+
+        def __init__(self):
+            super(SampleResourcedFixture, self).__init__()
+            self.package = fixtures.PythonPackage(
+            'resourceexample', [('__init__.py', _b("""
+from fixtures import Fixture
+from testresources import (
+    FixtureResource,
+    OptimisingTestSuite,
+    ResourcedTestCase,
+    )
+from testtools import TestCase
+
+class Printer(Fixture):
+
+    def setUp(self):
+        super(Printer, self).setUp()
+        print('Setting up Printer')
+
+    def reset(self):
+        pass
+
+class TestFoo(TestCase, ResourcedTestCase):
+    # When run, this will print just one Setting up Printer, unless the
+    # OptimisingTestSuite is not honoured, when one per test case will print.
+    resources=[('res', FixtureResource(Printer()))]
+    def test_bar(self):
+        pass
+    def test_foo(self):
+        pass
+    def test_quux(self):
+        pass
+def test_suite():
+    from unittest import TestLoader
+    return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
+"""))])
+
+        def setUp(self):
+            super(SampleResourcedFixture, self).setUp()
+            self.useFixture(self.package)
+            self.addCleanup(testtools.__path__.remove, self.package.base)
+            testtools.__path__.append(self.package.base)
+
+
+if fixtures and run.have_discover:
+    class SampleLoadTestsPackage(fixtures.Fixture):
+        """Creates a test suite package using load_tests."""
+
+        def __init__(self):
+            super(SampleLoadTestsPackage, self).__init__()
+            self.package = fixtures.PythonPackage(
+            'discoverexample', [('__init__.py', _b("""
+from testtools import TestCase, clone_test_with_new_id
+
+class TestExample(TestCase):
+    def test_foo(self):
+        pass
+
+def load_tests(loader, tests, pattern):
+    tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
+    return tests
+"""))])
+
+        def setUp(self):
+            super(SampleLoadTestsPackage, self).setUp()
+            self.useFixture(self.package)
+            self.addCleanup(sys.path.remove, self.package.base)
 
 
 class TestRun(TestCase):
@@ -49,14 +137,49 @@ class TestRun(TestCase):
         if fixtures is None:
             self.skipTest("Need fixtures")
 
+    def test_run_custom_list(self):
+        self.useFixture(SampleTestFixture())
+        tests = []
+        class CaptureList(run.TestToolsTestRunner):
+            def list(self, test):
+                tests.append(set([case.id() for case
+                    in testtools.testsuite.iterate_tests(test)]))
+        out = StringIO()
+        try:
+            program = run.TestProgram(
+                argv=['prog', '-l', 'testtools.runexample.test_suite'],
+                stdout=out, testRunner=CaptureList)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
+        self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
+            'testtools.runexample.TestFoo.test_quux'])], tests)
+
     def test_run_list(self):
         self.useFixture(SampleTestFixture())
         out = StringIO()
-        run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+        try:
+            run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
         self.assertEqual("""testtools.runexample.TestFoo.test_bar
 testtools.runexample.TestFoo.test_quux
 """, out.getvalue())
 
+    def test_run_list_failed_import(self):
+        if not run.have_discover:
+            self.skipTest("Need discover")
+        broken = self.useFixture(SampleTestFixture(broken=True))
+        out = StringIO()
+        exc = self.assertRaises(
+            SystemExit,
+            run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
+        self.assertEqual(2, exc.args[0])
+        self.assertEqual("""Failed to import
+runexample
+""", out.getvalue())
+
     def test_run_orders_tests(self):
         self.useFixture(SampleTestFixture())
         out = StringIO()
@@ -73,8 +196,12 @@ testtools.runexample.missingtest
 """))
         finally:
             f.close()
-        run.main(['prog', '-l', '--load-list', tempname,
-            'testtools.runexample.test_suite'], out)
+        try:
+            run.main(['prog', '-l', '--load-list', tempname,
+                'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
         self.assertEqual("""testtools.runexample.TestFoo.test_bar
 """, out.getvalue())
 
@@ -94,11 +221,42 @@ testtools.runexample.missingtest
 """))
         finally:
             f.close()
-        run.main(['prog', '-l', '--load-list', tempname,
-            'testtools.runexample.test_suite'], out)
+        try:
+            run.main(['prog', '-l', '--load-list', tempname,
+                'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
         self.assertEqual("""testtools.runexample.TestFoo.test_bar
 """, out.getvalue())
 
+    def test_load_list_preserves_custom_suites(self):
+        if testresources is None:
+            self.skipTest("Need testresources")
+        self.useFixture(SampleResourcedFixture())
+        # We load two tests, not loading one. Both share a resource, so we
+        # should see just one resource setup occur.
+        tempdir = self.useFixture(fixtures.TempDir())
+        tempname = tempdir.path + '/tests.list'
+        f = open(tempname, 'wb')
+        try:
+            f.write(_b("""
+testtools.resourceexample.TestFoo.test_bar
+testtools.resourceexample.TestFoo.test_foo
+"""))
+        finally:
+            f.close()
+        stdout = self.useFixture(fixtures.StringStream('stdout'))
+        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            try:
+                run.main(['prog', '--load-list', tempname,
+                    'testtools.resourceexample.test_suite'], stdout.stream)
+            except SystemExit:
+                # Evil resides in TestProgram.
+                pass
+        out = stdout.getDetails()['stdout'].as_text()
+        self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
+
     def test_run_failfast(self):
         stdout = self.useFixture(fixtures.StringStream('stdout'))
 
@@ -107,12 +265,43 @@ testtools.runexample.missingtest
                 self.fail('a')
             def test_b(self):
                 self.fail('b')
-        runner = run.TestToolsTestRunner(failfast=True)
         with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            runner = run.TestToolsTestRunner(failfast=True)
             runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
         self.assertThat(
             stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
 
+    def test_stdout_honoured(self):
+        self.useFixture(SampleTestFixture())
+        tests = []
+        out = StringIO()
+        exc = self.assertRaises(SystemExit, run.main,
+            argv=['prog', 'testtools.runexample.test_suite'],
+            stdout=out)
+        self.assertEqual((0,), exc.args)
+        self.assertThat(
+            out.getvalue(),
+            MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+    @skipUnless(run.have_discover, "discovery not present")
+    @skipUnless(fixtures, "fixtures not present")
+    def test_issue_16662(self):
+        # unittest's discover implementation didn't handle load_tests on
+        # packages. That is fixed pending commit, but we want to offer it
+        # to all testtools users regardless of Python version.
+        # See http://bugs.python.org/issue16662
+        pkg = self.useFixture(SampleLoadTestsPackage())
+        out = StringIO()
+        self.assertEqual(None, run.main(
+            ['prog', 'discover', '-l', pkg.package.base], out))
+        self.assertEqual(dedent("""\
+            discoverexample.TestExample.test_foo
+            fred
+            """), out.getvalue())
 
 
 def test_suite():
diff --git a/lib/testtools/testtools/tests/test_runtest.py b/lib/testtools/testtools/tests/test_runtest.py
index afbb8ba..3ae8b13 100644
--- a/lib/testtools/testtools/tests/test_runtest.py
+++ b/lib/testtools/testtools/tests/test_runtest.py
@@ -34,6 +34,12 @@ class TestRunTest(TestCase):
         run = RunTest("bar", handlers)
         self.assertEqual(handlers, run.handlers)
 
+    def test__init____handlers_last_resort(self):
+        handlers = [("quux", "baz")]
+        last_resort = "foo"
+        run = RunTest("bar", handlers, last_resort)
+        self.assertEqual(last_resort, run.last_resort)
+
     def test_run_with_result(self):
         # test.run passes result down to _run_test_method.
         log = []
@@ -61,15 +67,19 @@ class TestRunTest(TestCase):
         run.run()
         self.assertEqual(['foo'], log)
 
-    def test__run_user_does_not_catch_keyboard(self):
-        case = self.make_case()
-        def raises():
-            raise KeyboardInterrupt("yo")
-        run = RunTest(case, None)
+    def test__run_prepared_result_does_not_mask_keyboard(self):
+        class Case(TestCase):
+            def test(self):
+                raise KeyboardInterrupt("go")
+        case = Case('test')
+        run = RunTest(case)
         run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_user(raises),
+        self.assertThat(lambda: run._run_prepared_result(run.result),
             Raises(MatchesException(KeyboardInterrupt)))
-        self.assertEqual([], run.result._events)
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
+        # tearDown is still run though!
+        self.assertEqual(True, getattr(case, '_TestCase__teardown_called'))
 
     def test__run_user_calls_onException(self):
         case = self.make_case()
@@ -103,21 +113,43 @@ class TestRunTest(TestCase):
         self.assertEqual([], run.result._events)
         self.assertEqual([], log)
 
-    def test__run_user_uncaught_Exception_raised(self):
-        case = self.make_case()
+    def test__run_prepared_result_uncaught_Exception_raised(self):
         e = KeyError('Yo')
-        def raises():
-            raise e
+        class Case(TestCase):
+            def test(self):
+                raise e
+        case = Case('test')
         log = []
         def log_exc(self, result, err):
             log.append((result, err))
         run = RunTest(case, [(ValueError, log_exc)])
         run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_user(raises),
+        self.assertThat(lambda: run._run_prepared_result(run.result),
             Raises(MatchesException(KeyError)))
-        self.assertEqual([], run.result._events)
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
         self.assertEqual([], log)
 
+    def test__run_prepared_result_uncaught_Exception_triggers_error(self):
+        # https://bugs.launchpad.net/testtools/+bug/1364188
+        # When something isn't handled, the test that was
+        # executing has errored, one way or another.
+        e = SystemExit(0)
+        class Case(TestCase):
+            def test(self):
+                raise e
+        case = Case('test')
+        log = []
+        def log_exc(self, result, err):
+            log.append((result, err))
+        run = RunTest(case, [], log_exc)
+        run.result = ExtendedTestResult()
+        self.assertThat(lambda: run._run_prepared_result(run.result),
+            Raises(MatchesException(SystemExit)))
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
+        self.assertEqual([(run.result, e)], log)
+
     def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
         case = self.make_case()
         def broken_handler(exc_info):
diff --git a/lib/testtools/testtools/tests/test_spinner.py b/lib/testtools/testtools/tests/test_spinner.py
index 3d677bd..31110ca 100644
--- a/lib/testtools/testtools/tests/test_spinner.py
+++ b/lib/testtools/testtools/tests/test_spinner.py
@@ -5,11 +5,12 @@
 import os
 import signal
 
+from extras import try_import
+
 from testtools import (
     skipIf,
     TestCase,
     )
-from testtools.helpers import try_import
 from testtools.matchers import (
     Equals,
     Is,
@@ -231,7 +232,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
         from twisted.internet.protocol import ServerFactory
         reactor = self.make_reactor()
         spinner = self.make_spinner(reactor)
-        port = reactor.listenTCP(0, ServerFactory())
+        port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
         spinner.run(self.make_timeout(), lambda: None)
         results = spinner.get_junk()
         self.assertThat(results, Equals([port]))
@@ -244,14 +245,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
         timeout = self.make_timeout()
         spinner = self.make_spinner(reactor)
         spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
-        # Python before 2.5 has a race condition with thread handling where
-        # join() does not remove threads from enumerate before returning - the
-        # thread being joined does the removal. This was fixed in Python 2.5
-        # but we still support 2.4, so we have to workaround the issue.
-        # http://bugs.python.org/issue1703448.
-        self.assertThat(
-            [thread for thread in threading.enumerate() if thread.isAlive()],
-            Equals(current_threads))
+        self.assertThat(list(threading.enumerate()), Equals(current_threads))
 
     def test_leftover_junk_available(self):
         # If 'run' is given a function that leaves the reactor dirty in some
@@ -261,7 +255,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
         reactor = self.make_reactor()
         spinner = self.make_spinner(reactor)
         port = spinner.run(
-            self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
+            self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
         self.assertThat(spinner.get_junk(), Equals([port]))
 
     def test_will_not_run_with_previous_junk(self):
@@ -271,7 +265,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
         reactor = self.make_reactor()
         spinner = self.make_spinner(reactor)
         timeout = self.make_timeout()
-        spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+        spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
         self.assertThat(lambda: spinner.run(timeout, lambda: None),
             Raises(MatchesException(_spinner.StaleJunkError)))
 
@@ -282,7 +276,7 @@ class TestRunInReactor(NeedsTwistedTestCase):
         reactor = self.make_reactor()
         spinner = self.make_spinner(reactor)
         timeout = self.make_timeout()
-        port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
+        port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
         junk = spinner.clear_junk()
         self.assertThat(junk, Equals([port]))
         self.assertThat(spinner.get_junk(), Equals([]))
diff --git a/lib/testtools/testtools/tests/test_testcase.py b/lib/testtools/testtools/tests/test_testcase.py
index eca781b..4f3e146 100644
--- a/lib/testtools/testtools/tests/test_testcase.py
+++ b/lib/testtools/testtools/tests/test_testcase.py
@@ -8,6 +8,7 @@ import sys
 import unittest
 
 from testtools import (
+    DecorateTestCaseResult,
     ErrorHolder,
     MultipleExceptions,
     PlaceHolder,
@@ -23,15 +24,23 @@ from testtools.compat import (
     _b,
     _u,
     )
-from testtools.content import TracebackContent
+from testtools.content import (
+    text_content,
+    TracebackContent,
+    )
 from testtools.matchers import (
     Annotate,
     DocTestMatches,
     Equals,
+    HasLength,
     MatchesException,
     Raises,
     )
-from testtools.testcase import Nullary
+from testtools.testcase import (
+    attr,
+    Nullary,
+    WithAttributes,
+    )
 from testtools.testresult.doubles import (
     Python26TestResult,
     Python27TestResult,
@@ -109,7 +118,8 @@ class TestPlaceHolder(TestCase):
         log = []
         test.run(LoggingResult(log))
         self.assertEqual(
-            [('startTest', test), ('addSuccess', test), ('stopTest', test)],
+            [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
+             ('stopTest', test), ('tags', set(), set()),],
             log)
 
     def test_supplies_details(self):
@@ -118,9 +128,27 @@ class TestPlaceHolder(TestCase):
         result = ExtendedTestResult()
         test.run(result)
         self.assertEqual(
-            [('startTest', test),
+            [('tags', set(), set()),
+             ('startTest', test),
              ('addSuccess', test, details),
-             ('stopTest', test)],
+             ('stopTest', test),
+             ('tags', set(), set()),
+             ],
+            result._events)
+
+    def test_supplies_timestamps(self):
+        test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
+        result = ExtendedTestResult()
+        test.run(result)
+        self.assertEqual(
+            [('time', "A"),
+             ('tags', set(), set()),
+             ('startTest', test),
+             ('time', "B"),
+             ('addSuccess', test),
+             ('stopTest', test),
+             ('tags', set(), set()),
+             ],
             result._events)
 
     def test_call_is_run(self):
@@ -141,6 +169,19 @@ class TestPlaceHolder(TestCase):
         # A PlaceHolder can be debugged.
         self.makePlaceHolder().debug()
 
+    def test_supports_tags(self):
+        result = ExtendedTestResult()
+        tags = set(['foo', 'bar'])
+        case = PlaceHolder("foo", tags=tags)
+        case.run(result)
+        self.assertEqual([
+            ('tags', tags, set()),
+            ('startTest', case),
+            ('addSuccess', case),
+            ('stopTest', case),
+            ('tags', set(), tags),
+            ], result._events)
+
 
 class TestErrorHolder(TestCase):
     # Note that these tests exist because ErrorHolder exists - it could be
@@ -194,9 +235,11 @@ class TestErrorHolder(TestCase):
         log = result._events
         test.run(result)
         self.assertEqual(
-            [('startTest', test),
+            [('tags', set(), set()),
+             ('startTest', test),
              ('addError', test, test._details),
-             ('stopTest', test)], log)
+             ('stopTest', test),
+             ('tags', set(), set())], log)
 
     def test_call_is_run(self):
         # A PlaceHolder can be called, in which case it behaves like run.
@@ -259,6 +302,19 @@ class TestAssertions(TestCase):
         # assertRaises asserts that a callable raises a particular exception.
         self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
 
+    def test_assertRaises_exception_w_metaclass(self):
+        # assertRaises works when called for exceptions with custom metaclasses
+        class MyExMeta(type):
+            def __init__(cls, name, bases, dct):
+                """ Do some dummy metaclass stuff """
+                dct.update({'answer': 42})
+                type.__init__(cls, name, bases, dct)
+
+        class MyEx(Exception):
+            __metaclass__ = MyExMeta
+
+        self.assertRaises(MyEx, self.raiseError, MyEx)
+
     def test_assertRaises_fails_when_no_error_raised(self):
         # assertRaises raises self.failureException when it's passed a
         # callable that raises no error.
@@ -344,6 +400,16 @@ class TestAssertions(TestCase):
             '%r not in %r' % ('qux', 'foo bar baz'),
             self.assertIn, 'qux', 'foo bar baz')
 
+    def test_assertIn_failure_with_message(self):
+        # assertIn(needle, haystack) fails the test when 'needle' is not in
+        # 'haystack'.
+        self.assertFails('3 not in [0, 1, 2]: foo bar', self.assertIn, 3,
+                         [0, 1, 2], 'foo bar')
+        self.assertFails(
+            '%r not in %r: foo bar' % ('qux', 'foo bar baz'),
+            self.assertIn, 'qux', 'foo bar baz', 'foo bar')
+
+
     def test_assertNotIn_success(self):
         # assertNotIn(needle, haystack) asserts that 'needle' is not in
         # 'haystack'.
@@ -359,6 +425,18 @@ class TestAssertions(TestCase):
             "'foo bar baz' matches Contains('foo')",
             self.assertNotIn, 'foo', 'foo bar baz')
 
+
+    def test_assertNotIn_failure_with_message(self):
+        # assertNotIn(needle, haystack) fails the test when 'needle' is in
+        # 'haystack'.
+        self.assertFails('[1, 2, 3] matches Contains(3): foo bar', self.assertNotIn,
+            3, [1, 2, 3], 'foo bar')
+        self.assertFails(
+            "'foo bar baz' matches Contains('foo'): foo bar",
+            self.assertNotIn, 'foo', 'foo bar baz', "foo bar")
+
+
+
     def test_assertIsInstance(self):
         # assertIsInstance asserts that an object is an instance of a class.
 
@@ -513,6 +591,48 @@ class TestAssertions(TestCase):
         self.assertFails(
             expected, self.assertThat, matchee, matcher, verbose=True)
 
+    def test_expectThat_matches_clean(self):
+        class Matcher(object):
+            def match(self, foo):
+                return None
+        self.expectThat("foo", Matcher())
+
+    def test_expectThat_mismatch_fails_test(self):
+        class Test(TestCase):
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+        result = Test("test").run()
+        self.assertFalse(result.wasSuccessful())
+
+    def test_expectThat_does_not_exit_test(self):
+        class Test(TestCase):
+            marker = False
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+                Test.marker = True
+        result = Test("test").run()
+        self.assertFalse(result.wasSuccessful())
+        self.assertTrue(Test.marker)
+
+    def test_expectThat_adds_detail(self):
+        class Test(TestCase):
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+        test = Test("test")
+        result = test.run()
+        details = test.getDetails()
+        self.assertTrue("Failed expectation" in details)
+
+    def test__force_failure_fails_test(self):
+        class Test(TestCase):
+            def test_foo(self):
+                self.force_failure = True
+                self.remaining_code_run = True
+        test = Test('test_foo')
+        result = test.run()
+        self.assertFalse(result.wasSuccessful())
+        self.assertTrue(test.remaining_code_run)
+
     def get_error_string(self, e):
         """Get the string showing how 'e' would be formatted in test output.
 
@@ -609,6 +729,18 @@ class TestAssertions(TestCase):
         self.assertFails(expected_error, self.assertIsNotNone, None)
 
 
+    def test_fail_preserves_traceback_detail(self):
+        class Test(TestCase):
+            def test(self):
+                self.addDetail('traceback', text_content('foo'))
+                self.fail('bar')
+        test = Test('test')
+        result = ExtendedTestResult()
+        test.run(result)
+        self.assertEqual(set(['traceback', 'traceback-1']),
+            set(result._events[1][2].keys()))
+
+
 class TestAddCleanup(TestCase):
     """Tests for TestCase.addCleanup."""
 
@@ -777,6 +909,18 @@ class TestAddCleanup(TestCase):
             set(self.logging_result._events[1][2].keys()))
 
 
+class TestRunTestUsage(TestCase):
+
+    def test_last_resort_in_place(self):
+        class TestBase(TestCase):
+            def test_base_exception(self):
+                raise SystemExit(0)
+        result = ExtendedTestResult()
+        test = TestBase("test_base_exception")
+        self.assertRaises(SystemExit, test.run, result)
+        self.assertFalse(result.wasSuccessful())
+
+
 class TestWithDetails(TestCase):
 
     run_test_with = FullStackRunTest
@@ -874,6 +1018,28 @@ class TestExpectedFailure(TestWithDetails):
         self.assertDetailsProvided(case, "addUnexpectedSuccess",
             ["foo", "reason"])
 
+    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
+    def test_unittest_expectedFailure_decorator_works_with_failure(self):
+        class ReferenceTest(TestCase):
+            @unittest.expectedFailure
+            def test_fails_expectedly(self):
+                self.assertEquals(1, 0)
+
+        test = ReferenceTest('test_fails_expectedly')
+        result = test.run()
+        self.assertEqual(True, result.wasSuccessful())
+
+    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
+    def test_unittest_expectedFailure_decorator_works_with_success(self):
+        class ReferenceTest(TestCase):
+            @unittest.expectedFailure
+            def test_passes_unexpectedly(self):
+                self.assertEquals(1, 1)
+
+        test = ReferenceTest('test_passes_unexpectedly')
+        result = test.run()
+        self.assertEqual(False, result.wasSuccessful())
+
 
 class TestUniqueFactories(TestCase):
     """Tests for getUniqueString and getUniqueInteger."""
@@ -1039,11 +1205,32 @@ class TestDetailsProvided(TestWithDetails):
         self.assertDetailsProvided(Case("test"), "addFailure",
             ["foo", "foo-1", "traceback"])
 
+    def test_addDetailUniqueName_works(self):
+        content = self.get_content()
+        class Case(TestCase):
+            def test(self):
+                self.addDetailUniqueName("foo", content)
+                self.addDetailUniqueName("foo", content)
+        self.assertDetailsProvided(Case("test"), "addSuccess",
+            ["foo", "foo-1"])
+
 
 class TestSetupTearDown(TestCase):
 
     run_test_with = FullStackRunTest
 
+    def test_setUpCalledTwice(self):
+        class CallsTooMuch(TestCase):
+            def test_method(self):
+                self.setUp()
+        result = unittest.TestResult()
+        CallsTooMuch('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
     def test_setUpNotCalled(self):
         class DoesnotcallsetUp(TestCase):
             def setUp(self):
@@ -1052,7 +1239,23 @@ class TestSetupTearDown(TestCase):
                 pass
         result = unittest.TestResult()
         DoesnotcallsetUp('test_method').run(result)
-        self.assertEqual(1, len(result.errors))
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+    def test_tearDownCalledTwice(self):
+        class CallsTooMuch(TestCase):
+            def test_method(self):
+                self.tearDown()
+        result = unittest.TestResult()
+        CallsTooMuch('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
 
     def test_tearDownNotCalled(self):
         class DoesnotcalltearDown(TestCase):
@@ -1062,7 +1265,17 @@ class TestSetupTearDown(TestCase):
                 pass
         result = unittest.TestResult()
         DoesnotcalltearDown('test_method').run(result)
-        self.assertEqual(1, len(result.errors))
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+
+require_py27_minimum = skipIf(
+    sys.version < '2.7',
+    "Requires python 2.7 or greater"
+)
 
 
 class TestSkipping(TestCase):
@@ -1168,6 +1381,71 @@ class TestSkipping(TestCase):
         test.run(result)
         self.assertEqual('addSuccess', result._events[1][0])
 
+    def check_skip_decorator_does_not_run_setup(self, decorator, reason):
+        class SkippingTest(TestCase):
+
+            setup_ran = False
+
+            def setUp(self):
+                super(SkippingTest, self).setUp()
+                self.setup_ran = True
+
+            # Use the decorator passed to us:
+            @decorator
+            def test_skipped(self):
+                self.fail()
+
+        test = SkippingTest('test_skipped')
+        result = test.run()
+        self.assertTrue(result.wasSuccessful())
+        self.assertTrue(reason in result.skip_reasons, result.skip_reasons)
+        self.assertFalse(test.setup_ran)
+
+    def test_testtools_skip_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skip(reason),
+            reason
+        )
+
+    def test_testtools_skipIf_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skipIf(True, reason),
+            reason
+        )
+
+    def test_testtools_skipUnless_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skipUnless(False, reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skip_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skip(reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skipIf_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skipIf(True, reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skipUnless_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skipUnless(False, reason),
+            reason
+        )
+
 
 class TestOnException(TestCase):
 
@@ -1330,6 +1608,126 @@ class TestNullary(TestCase):
         self.assertRaises(ZeroDivisionError, wrapped)
 
 
+class TestAttributes(TestCase):
+
+    def test_simple_attr(self):
+        # Adding an attr to a test changes its id().
+        class MyTest(WithAttributes, TestCase):
+            @attr('foo')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
+            case.id())
+
+    def test_multiple_attributes(self):
+        class MyTest(WithAttributes, TestCase):
+            # Not sorted here, forward or backwards.
+            @attr('foo', 'quux', 'bar')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual(
+            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+            case.id())
+
+    def test_multiple_attr_decorators(self):
+        class MyTest(WithAttributes, TestCase):
+            # Not sorted here, forward or backwards.
+            @attr('bar')
+            @attr('quux')
+            @attr('foo')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual(
+            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+            case.id())
+
+
+class TestDecorateTestCaseResult(TestCase):
+
+    def setUp(self):
+        super(TestDecorateTestCaseResult, self).setUp()
+        self.log = []
+
+    def make_result(self, result):
+        self.log.append(('result', result))
+        return LoggingResult(self.log)
+
+    def test___call__(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+        case(None)
+        case('something')
+        self.assertEqual([('result', None),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            ('result', 'something'),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set())
+            ], self.log)
+
+    def test_run(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+        case.run(None)
+        case.run('something')
+        self.assertEqual([('result', None),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            ('result', 'something'),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set())
+            ], self.log)
+
+    def test_before_after_hooks(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
+            before_run=lambda result: self.log.append('before'),
+            after_run=lambda result: self.log.append('after'))
+        case.run(None)
+        case(None)
+        self.assertEqual([
+            ('result', None),
+            'before',
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            'after',
+            ('result', None),
+            'before',
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            'after',
+            ], self.log)
+
+    def test_other_attribute(self):
+        orig = PlaceHolder('foo')
+        orig.thing = 'fred'
+        case = DecorateTestCaseResult(orig, self.make_result)
+        self.assertEqual('fred', case.thing)
+        self.assertRaises(AttributeError, getattr, case, 'other')
+        case.other = 'barbara'
+        self.assertEqual('barbara', orig.other)
+        del case.thing
+        self.assertRaises(AttributeError, getattr, orig, 'thing')
+
+
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testresult.py b/lib/testtools/testtools/tests/test_testresult.py
index 68fcc38..a0a8aa3 100644
--- a/lib/testtools/testtools/tests/test_testresult.py
+++ b/lib/testtools/testtools/tests/test_testresult.py
@@ -7,6 +7,7 @@ __metaclass__ = type
 import codecs
 import datetime
 import doctest
+from itertools import chain, combinations
 import os
 import shutil
 import sys
@@ -15,17 +16,33 @@ import threading
 from unittest import TestSuite
 import warnings
 
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
 from testtools import (
+    CopyStreamResult,
     ExtendedToOriginalDecorator,
+    ExtendedToStreamDecorator,
     MultiTestResult,
     PlaceHolder,
+    StreamFailFast,
+    StreamResult,
+    StreamResultRouter,
+    StreamSummary,
+    StreamTagger,
+    StreamToDict,
+    StreamToExtendedDecorator,
+    StreamToQueue,
     Tagger,
     TestCase,
+    TestControl,
     TestResult,
     TestResultDecorator,
     TestByTestResult,
     TextTestResult,
     ThreadsafeForwardingResult,
+    TimestampingStreamResult,
     testresult,
     )
 from testtools.compat import (
@@ -44,11 +61,12 @@ from testtools.content import (
     TracebackContent,
     )
 from testtools.content_type import ContentType, UTF8_TEXT
-from testtools.helpers import safe_hasattr
 from testtools.matchers import (
+    AllMatch,
     Contains,
     DocTestMatches,
     Equals,
+    HasLength,
     MatchesAny,
     MatchesException,
     Raises,
@@ -63,6 +81,7 @@ from testtools.testresult.doubles import (
     Python26TestResult,
     Python27TestResult,
     ExtendedTestResult,
+    StreamResult as LoggingStreamResult,
     )
 from testtools.testresult.real import (
     _details_to_str,
@@ -221,18 +240,21 @@ class TagsContract(Python27Contract):
     def test_no_tags_by_default(self):
         # Results initially have no tags.
         result = self.makeResult()
+        result.startTestRun()
         self.assertEqual(frozenset(), result.current_tags)
 
     def test_adding_tags(self):
         # Tags are added using 'tags' and thus become visible in
         # 'current_tags'.
         result = self.makeResult()
+        result.startTestRun()
         result.tags(set(['foo']), set())
         self.assertEqual(set(['foo']), result.current_tags)
 
     def test_removing_tags(self):
         # Tags are removed using 'tags'.
         result = self.makeResult()
+        result.startTestRun()
         result.tags(set(['foo']), set())
         result.tags(set(), set(['foo']))
         self.assertEqual(set(), result.current_tags)
@@ -240,6 +262,7 @@ class TagsContract(Python27Contract):
     def test_startTestRun_resets_tags(self):
         # startTestRun makes a new test run, and thus clears all the tags.
         result = self.makeResult()
+        result.startTestRun()
         result.tags(set(['foo']), set())
         result.startTestRun()
         self.assertEqual(set(), result.current_tags)
@@ -437,6 +460,12 @@ class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
         return ExtendedToOriginalDecorator(Python27TestResult())
 
 
+class TestAdaptedStreamResult(TestCase, DetailsContract):
+
+    def makeResult(self):
+        return ExtendedToStreamDecorator(StreamResult())
+
+
 class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
 
     run_test_with = FullStackRunTest
@@ -445,6 +474,581 @@ class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
         return TestResultDecorator(TestResult())
 
 
+# DetailsContract because ExtendedToStreamDecorator follows Python for
+# uxsuccess handling.
+class TestStreamToExtendedContract(TestCase, DetailsContract):
+
+    def makeResult(self):
+        return ExtendedToStreamDecorator(
+            StreamToExtendedDecorator(ExtendedTestResult()))
+
+
+class TestStreamResultContract(object):
+
+    def _make_result(self):
+        raise NotImplementedError(self._make_result)
+
+    def test_startTestRun(self):
+        result = self._make_result()
+        result.startTestRun()
+        result.stopTestRun()
+
+    def test_files(self):
+        # Test parameter combinations when files are being emitted.
+        result = self._make_result()
+        result.startTestRun()
+        self.addCleanup(result.stopTestRun)
+        now = datetime.datetime.now(utc)
+        inputs = list(dict(
+            eof=True,
+            mime_type="text/plain",
+            route_code=_u("1234"),
+            test_id=_u("foo"),
+            timestamp=now,
+            ).items())
+        param_dicts = self._power_set(inputs)
+        for kwargs in param_dicts:
+            result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
+            result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
+
+    def test_test_status(self):
+        # Tests non-file attachment parameter combinations.
+        result = self._make_result()
+        result.startTestRun()
+        self.addCleanup(result.stopTestRun)
+        now = datetime.datetime.now(utc)
+        args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
+            'uxsuccess', 'success', 'fail', 'skip']]
+        inputs = list(dict(
+            runnable=False,
+            test_tags=set(['quux']),
+            route_code=_u("1234"),
+            timestamp=now,
+            ).items())
+        param_dicts = self._power_set(inputs)
+        for kwargs in param_dicts:
+            for arg in args:
+                result.status(test_id=arg[0], test_status=arg[1], **kwargs)
+
+    def _power_set(self, iterable):
+        "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+        s = list(iterable)
+        param_dicts = []
+        for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
+            param_dicts.append(dict(ss))
+        return param_dicts
+
+
+class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamResult()
+
+
+class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return CopyStreamResult([StreamResult(), StreamResult()])
+
+
+class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return LoggingStreamResult()
+
+
+class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamSummary()
+
+
+class TestStreamTaggerContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamTagger([StreamResult()], add=set(), discard=set())
+
+
+class TestStreamToDictContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamToDict(lambda x:None)
+
+
+class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamToExtendedDecorator(ExtendedTestResult())
+
+
+class TestStreamToQueueContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        queue = Queue()
+        return StreamToQueue(queue, "foo")
+
+
+class TestStreamFailFastContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamFailFast(lambda:None)
+
+
+class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamResultRouter(StreamResult())
+
+
+class TestDoubleStreamResultEvents(TestCase):
+
+    def test_startTestRun(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        self.assertEqual([('startTestRun',)], result._events)
+
+    def test_stopTestRun(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        result.stopTestRun()
+        self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
+
+    def test_file(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
+            test_id="id", route_code='abc', timestamp=now)
+        self.assertEqual(
+            [('startTestRun',),
+             ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
+            result._events)
+
+    def test_status(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.status("foo", "success", test_tags=set(['tag']),
+            runnable=False, route_code='abc', timestamp=now)
+        self.assertEqual(
+            [('startTestRun',),
+             ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
+            result._events)
+
+
+class TestCopyStreamResultCopies(TestCase):
+
+    def setUp(self):
+        super(TestCopyStreamResultCopies, self).setUp()
+        self.target1 = LoggingStreamResult()
+        self.target2 = LoggingStreamResult()
+        self.targets = [self.target1._events, self.target2._events]
+        self.result = CopyStreamResult([self.target1, self.target2])
+
+    def test_startTestRun(self):
+        self.result.startTestRun()
+        self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
+
+    def test_stopTestRun(self):
+        self.result.startTestRun()
+        self.result.stopTestRun()
+        self.assertThat(self.targets,
+            AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+
+    def test_status(self):
+        self.result.startTestRun()
+        now = datetime.datetime.now(utc)
+        self.result.status("foo", "success", test_tags=set(['tag']),
+            runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
+            mime_type="text/json", route_code='abc', timestamp=now)
+        self.assertThat(self.targets,
+            AllMatch(Equals([('startTestRun',),
+                ('status', 'foo', 'success', set(['tag']), False, "foo",
+                 b'bar', True, "text/json", 'abc', now)
+                ])))
+
+
+class TestStreamTagger(TestCase):
+
+    def test_adding(self):
+        log = LoggingStreamResult()
+        result = StreamTagger([log], add=['foo'])
+        result.startTestRun()
+        result.status()
+        result.status(test_tags=set(['bar']))
+        result.status(test_tags=None)
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+            ('stopTestRun',),
+            ], log._events)
+
+    def test_discarding(self):
+        log = LoggingStreamResult()
+        result = StreamTagger([log], discard=['foo'])
+        result.startTestRun()
+        result.status()
+        result.status(test_tags=None)
+        result.status(test_tags=set(['foo']))
+        result.status(test_tags=set(['bar']))
+        result.status(test_tags=set(['foo', 'bar']))
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+            ('stopTestRun',),
+            ], log._events)
+
+
+class TestStreamToDict(TestCase):
+
+    def test_hung_test(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status('foo', 'inprogress')
+        self.assertEqual([], tests)
+        result.stopTestRun()
+        self.assertEqual([
+            {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
+             'timestamps': [None, None]}
+            ], tests)
+
+    def test_all_terminal_states_reported(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status('success', 'success')
+        result.status('skip', 'skip')
+        result.status('exists', 'exists')
+        result.status('fail', 'fail')
+        result.status('xfail', 'xfail')
+        result.status('uxsuccess', 'uxsuccess')
+        self.assertThat(tests, HasLength(6))
+        self.assertEqual(
+            ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
+            [test['id'] for test in tests])
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(6))
+
+    def test_files_reported(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(file_name="some log.txt",
+            file_bytes=_b("1234 log message"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status(file_name="another file",
+            file_bytes=_b("""Traceback..."""), test_id="foo.bar")
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(1))
+        test = tests[0]
+        self.assertEqual("foo.bar", test['id'])
+        self.assertEqual("unknown", test['status'])
+        details = test['details']
+        self.assertEqual(
+            _u("1234 log message"), details['some log.txt'].as_text())
+        self.assertEqual(
+            _b("Traceback..."),
+            _b('').join(details['another file'].iter_bytes()))
+        self.assertEqual(
+            "application/octet-stream", repr(details['another file'].content_type))
+
+    def test_bad_mime(self):
+        # Testtools was making bad mime types, this tests that the specific
+        # corruption is catered for.
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(file_name="file", file_bytes=b'a',
+            mime_type='text/plain; charset=utf8, language=python',
+            test_id='id')
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(1))
+        test = tests[0]
+        self.assertEqual("id", test['id'])
+        details = test['details']
+        self.assertEqual(_u("a"), details['file'].as_text())
+        self.assertEqual(
+            "text/plain; charset=\"utf8\"",
+            repr(details['file'].content_type))
+
+    def test_timestamps(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(test_id='foo', test_status='inprogress', timestamp="A")
+        result.status(test_id='foo', test_status='success', timestamp="B")
+        result.status(test_id='bar', test_status='inprogress', timestamp="C")
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(2))
+        self.assertEqual(["A", "B"], tests[0]['timestamps'])
+        self.assertEqual(["C", None], tests[1]['timestamps'])
+
+
+class TestExtendedToStreamDecorator(TestCase):
+
+    def test_explicit_time(self):
+        log = LoggingStreamResult()
+        result = ExtendedToStreamDecorator(log)
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.time(now)
+        result.startTest(self)
+        result.addSuccess(self)
+        result.stopTest(self)
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status',
+             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             None,
+             now),
+            ('status',
+             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+             'success',
+              set(),
+              True,
+              None,
+              None,
+              False,
+              None,
+              None,
+              now),
+             ('stopTestRun',)], log._events)
+
+    def test_wasSuccessful_after_stopTestRun(self):
+        log = LoggingStreamResult()
+        result = ExtendedToStreamDecorator(log)
+        result.startTestRun()
+        result.status(test_id='foo', test_status='fail')
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+
+
+class TestStreamFailFast(TestCase):
+
+    def test_inprogress(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'inprogress')
+
+    def test_exists(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'exists')
+
+    def test_xfail(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'xfail')
+
+    def test_uxsuccess(self):
+        calls = []
+        def hook():
+            calls.append("called")
+        result = StreamFailFast(hook)
+        result.status('foo', 'uxsuccess')
+        result.status('foo', 'uxsuccess')
+        self.assertEqual(['called', 'called'], calls)
+
+    def test_success(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'success')
+
+    def test_fail(self):
+        calls = []
+        def hook():
+            calls.append("called")
+        result = StreamFailFast(hook)
+        result.status('foo', 'fail')
+        result.status('foo', 'fail')
+        self.assertEqual(['called', 'called'], calls)
+
+    def test_skip(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'skip')
+
+
+class TestStreamSummary(TestCase):
+
+    def test_attributes(self):
+        result = StreamSummary()
+        result.startTestRun()
+        self.assertEqual([], result.failures)
+        self.assertEqual([], result.errors)
+        self.assertEqual([], result.skipped)
+        self.assertEqual([], result.expectedFailures)
+        self.assertEqual([], result.unexpectedSuccesses)
+        self.assertEqual(0, result.testsRun)
+
+    def test_startTestRun(self):
+        result = StreamSummary()
+        result.startTestRun()
+        result.failures.append('x')
+        result.errors.append('x')
+        result.skipped.append('x')
+        result.expectedFailures.append('x')
+        result.unexpectedSuccesses.append('x')
+        result.testsRun = 1
+        result.startTestRun()
+        self.assertEqual([], result.failures)
+        self.assertEqual([], result.errors)
+        self.assertEqual([], result.skipped)
+        self.assertEqual([], result.expectedFailures)
+        self.assertEqual([], result.unexpectedSuccesses)
+        self.assertEqual(0, result.testsRun)
+
+    def test_wasSuccessful(self):
+        # wasSuccessful returns False if any of
+        # failures/errors is non-empty.
+        result = StreamSummary()
+        result.startTestRun()
+        self.assertEqual(True, result.wasSuccessful())
+        result.failures.append('x')
+        self.assertEqual(False, result.wasSuccessful())
+        result.startTestRun()
+        result.errors.append('x')
+        self.assertEqual(False, result.wasSuccessful())
+        result.startTestRun()
+        result.skipped.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+        result.startTestRun()
+        result.expectedFailures.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+        result.startTestRun()
+        result.unexpectedSuccesses.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+
+    def test_stopTestRun(self):
+        result = StreamSummary()
+        # terminal successful codes.
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.status("foo", "success")
+        result.status("bar", "skip")
+        result.status("baz", "exists")
+        result.stopTestRun()
+        self.assertEqual(True, result.wasSuccessful())
+        # Existence is terminal but doesn't count as 'running' a test.
+        self.assertEqual(2, result.testsRun)
+
+    def test_stopTestRun_inprogress_test_fails(self):
+        # Tests inprogress at stopTestRun trigger a failure.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+        self.assertThat(result.errors, HasLength(1))
+        self.assertEqual("foo", result.errors[0][0].id())
+        self.assertEqual("Test did not complete", result.errors[0][1])
+        # interim state detection handles route codes - while duplicate ids in
+        # one run is undesirable, it may happen (e.g. with repeated tests).
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.status("foo", "inprogress", route_code="A")
+        result.status("foo", "success", route_code="A")
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+
+    def test_status_skip(self):
+        # when skip is seen, a synthetic test is reported with reason captured
+        # from the 'reason' file attachment if any.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status(file_name="reason",
+            file_bytes=_b("Missing dependency"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status("foo.bar", "skip")
+        self.assertThat(result.skipped, HasLength(1))
+        self.assertEqual("foo.bar", result.skipped[0][0].id())
+        self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
+
+    def _report_files(self, result):
+        result.status(file_name="some log.txt",
+            file_bytes=_b("1234 log message"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status(file_name="traceback",
+            file_bytes=_b("""Traceback (most recent call last):
+  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
+
+    files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
+
+Traceback (most recent call last):
+  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""))
+
+    def test_status_fail(self):
+        # when fail is seen, a synthetic test is reported with all files
+        # attached shown as the message.
+        result = StreamSummary()
+        result.startTestRun()
+        self._report_files(result)
+        result.status("foo.bar", "fail")
+        self.assertThat(result.errors, HasLength(1))
+        self.assertEqual("foo.bar", result.errors[0][0].id())
+        self.assertThat(result.errors[0][1], self.files_message)
+
+    def test_status_xfail(self):
+        # when xfail is seen, a synthetic test is reported with all files
+        # attached shown as the message.
+        result = StreamSummary()
+        result.startTestRun()
+        self._report_files(result)
+        result.status("foo.bar", "xfail")
+        self.assertThat(result.expectedFailures, HasLength(1))
+        self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
+        self.assertThat(result.expectedFailures[0][1], self.files_message)
+
+    def test_status_uxsuccess(self):
+        # when uxsuccess is seen, a synthetic test is reported.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status("foo.bar", "uxsuccess")
+        self.assertThat(result.unexpectedSuccesses, HasLength(1))
+        self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
+
+
+class TestTestControl(TestCase):
+
+    def test_default(self):
+        self.assertEqual(False, TestControl().shouldStop)
+
+    def test_stop(self):
+        control = TestControl()
+        control.stop()
+        self.assertEqual(True, control.shouldStop)
+
+
 class TestTestResult(TestCase):
     """Tests for 'TestResult'."""
 
@@ -785,6 +1389,7 @@ class TestTextTestResult(TestCase):
             DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
 
     def test_stopTestRun_shows_details(self):
+        self.skip("Disabled per bug 1188420")
         def run_tests():
             self.result.startTestRun()
             make_erroring_test().run(self.result)
@@ -1097,6 +1702,193 @@ class TestMergeTags(TestCase):
             expected, _merge_tags(current_tags, changing_tags))
 
 
+class TestStreamResultRouter(TestCase):
+
+    def test_start_stop_test_run_no_fallback(self):
+        result = StreamResultRouter()
+        result.startTestRun()
+        result.stopTestRun()
+
+    def test_no_fallback_errors(self):
+        self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
+
+    def test_fallback_calls(self):
+        fallback = LoggingStreamResult()
+        result = StreamResultRouter(fallback)
+        result.startTestRun()
+        result.status(test_id='foo')
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ('stopTestRun',),
+            ],
+            fallback._events)
+
+    def test_fallback_no_do_start_stop_run(self):
+        fallback = LoggingStreamResult()
+        result = StreamResultRouter(fallback, do_start_stop_run=False)
+        result.startTestRun()
+        result.status(test_id='foo')
+        result.stopTestRun()
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None)
+            ],
+            fallback._events)
+
+    def test_add_rule_bad_policy(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
+            route_prefix='0')
+
+    def test_add_rule_extra_policy_arg(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+            route_prefix='0', foo=1)
+
+    def test_add_rule_missing_prefix(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
+
+    def test_add_rule_slash_in_prefix(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+            route_prefix='0/')
+
+    def test_add_rule_route_code_consume_False(self):
+        fallback = LoggingStreamResult()
+        target = LoggingStreamResult()
+        router = StreamResultRouter(fallback)
+        router.add_rule(target, 'route_code_prefix', route_prefix='0')
+        router.status(test_id='foo', route_code='0')
+        router.status(test_id='foo', route_code='0/1')
+        router.status(test_id='foo')
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, '0',
+             None),
+            ('status', 'foo', None, None, True, None, None, False, None, '0/1',
+             None),
+            ],
+            target._events)
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ],
+            fallback._events)
+
+    def test_add_rule_route_code_consume_True(self):
+        fallback = LoggingStreamResult()
+        target = LoggingStreamResult()
+        router = StreamResultRouter(fallback)
+        router.add_rule(
+            target, 'route_code_prefix', route_prefix='0', consume_route=True)
+        router.status(test_id='foo', route_code='0') # -> None
+        router.status(test_id='foo', route_code='0/1') # -> 1
+        router.status(test_id='foo', route_code='1') # -> fallback as-is.
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ('status', 'foo', None, None, True, None, None, False, None, '1',
+             None),
+            ],
+            target._events)
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, '1',
+             None),
+            ],
+            fallback._events)
+
+    def test_add_rule_test_id(self):
+        nontest = LoggingStreamResult()
+        test = LoggingStreamResult()
+        router = StreamResultRouter(test)
+        router.add_rule(nontest, 'test_id', test_id=None)
+        router.status(test_id='foo', file_name="bar", file_bytes=b'')
+        router.status(file_name="bar", file_bytes=b'')
+        self.assertEqual([
+            ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
+             None),], test._events)
+        self.assertEqual([
+            ('status', None, None, None, True, 'bar', b'', False, None, None,
+             None),], nontest._events)
+
+    def test_add_rule_do_start_stop_run(self):
+        nontest = LoggingStreamResult()
+        router = StreamResultRouter()
+        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+        router.startTestRun()
+        router.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('stopTestRun',),
+            ], nontest._events)
+
+    def test_add_rule_do_start_stop_run_after_startTestRun(self):
+        nontest = LoggingStreamResult()
+        router = StreamResultRouter()
+        router.startTestRun()
+        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+        router.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('stopTestRun',),
+            ], nontest._events)
+
+
+class TestStreamToQueue(TestCase):
+
+    def make_result(self):
+        queue = Queue()
+        return queue, StreamToQueue(queue, "foo")
+
+    def test_status(self):
+        def check_event(event_dict, route=None, time=None):
+            self.assertEqual("status", event_dict['event'])
+            self.assertEqual("test", event_dict['test_id'])
+            self.assertEqual("fail", event_dict['test_status'])
+            self.assertEqual(set(["quux"]), event_dict['test_tags'])
+            self.assertEqual(False, event_dict['runnable'])
+            self.assertEqual("file", event_dict['file_name'])
+            self.assertEqual(_b("content"), event_dict['file_bytes'])
+            self.assertEqual(True, event_dict['eof'])
+            self.assertEqual("quux", event_dict['mime_type'])
+            self.assertEqual("test", event_dict['test_id'])
+            self.assertEqual(route, event_dict['route_code'])
+            self.assertEqual(time, event_dict['timestamp'])
+        queue, result = self.make_result()
+        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+            file_name="file", file_bytes=_b("content"), eof=True,
+            mime_type="quux", route_code=None, timestamp=None)
+        self.assertEqual(1, queue.qsize())
+        a_time = datetime.datetime.now(utc)
+        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+            file_name="file", file_bytes=_b("content"), eof=True,
+            mime_type="quux", route_code="bar", timestamp=a_time)
+        self.assertEqual(2, queue.qsize())
+        check_event(queue.get(False), route="foo", time=None)
+        check_event(queue.get(False), route="foo/bar", time=a_time)
+
+    def testStartTestRun(self):
+        queue, result = self.make_result()
+        result.startTestRun()
+        self.assertEqual(
+            {'event':'startTestRun', 'result':result}, queue.get(False))
+        self.assertTrue(queue.empty())
+
+    def testStopTestRun(self):
+        queue, result = self.make_result()
+        result.stopTestRun()
+        self.assertEqual(
+            {'event':'stopTestRun', 'result':result}, queue.get(False))
+        self.assertTrue(queue.empty())
+
+
 class TestExtendedToOriginalResultDecoratorBase(TestCase):
 
     def make_26_result(self):
@@ -1558,13 +2350,13 @@ class TestNonAsciiResults(TestCase):
 
     def _test_external_case(self, testline, coding="ascii", modulelevel="",
             suffix=""):
-        """Create and run a test case in a separate module"""
+        """Create and run a test case in a seperate module"""
         self._setup_external_case(testline, coding, modulelevel, suffix)
         return self._run_external_case()
 
     def _setup_external_case(self, testline, coding="ascii", modulelevel="",
             suffix=""):
-        """Create a test case in a separate module"""
+        """Create a test case in a seperate module"""
         _, prefix, self.modname = self.id().rsplit(".", 2)
         self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
         self.addCleanup(shutil.rmtree, self.dir)
@@ -1580,7 +2372,7 @@ class TestNonAsciiResults(TestCase):
             "        %s\n" % (coding, modulelevel, testline))
 
     def _run_external_case(self):
-        """Run the prepared test case in a separate module"""
+        """Run the prepared test case in a seperate module"""
         sys.path.insert(0, self.dir)
         self.addCleanup(sys.path.remove, self.dir)
         module = __import__(self.modname)
@@ -1589,11 +2381,6 @@ class TestNonAsciiResults(TestCase):
         self._run(stream, module.Test())
         return stream.getvalue()
 
-    def _silence_deprecation_warnings(self):
-        """Shut up DeprecationWarning for this test only"""
-        warnings.simplefilter("ignore", DeprecationWarning)
-        self.addCleanup(warnings.filters.remove, warnings.filters[0])
-
     def _get_sample_text(self, encoding="unicode_internal"):
         if encoding is None and str_is_unicode:
            encoding = "unicode_internal"
@@ -1635,7 +2422,7 @@ class TestNonAsciiResults(TestCase):
         if sys.version_info > (3, 3):
             return MatchesAny(Contains("FileExistsError: "),
                               Contains("PermissionError: "))
-        elif os.name != "nt" or sys.version_info < (2, 5):
+        elif os.name != "nt":
             return Contains(self._as_output("OSError: "))
         else:
             return Contains(self._as_output("WindowsError: "))
@@ -1699,15 +2486,6 @@ class TestNonAsciiResults(TestCase):
             "UnprintableError: <unprintable UnprintableError object>\n"),
             textoutput)
 
-    def test_string_exception(self):
-        """Raise a string rather than an exception instance if supported"""
-        if sys.version_info > (2, 6):
-            self.skip("No string exceptions in Python 2.6 or later")
-        elif sys.version_info > (2, 5):
-            self._silence_deprecation_warnings()
-        textoutput = self._test_external_case(testline="raise 'plain str'")
-        self.assertIn(self._as_output("\nplain str\n"), textoutput)
-
     def test_non_ascii_dirname(self):
         """Script paths in the traceback can be non-ascii"""
         text, raw = self._get_sample_text(sys.getfilesystemencoding())
@@ -1737,9 +2515,6 @@ class TestNonAsciiResults(TestCase):
 
     def test_syntax_error_import_binary(self):
         """Importing a binary file shouldn't break SyntaxError formatting"""
-        if sys.version_info < (2, 5):
-            # Python 2.4 assumes the file is latin-1 and tells you off
-            self._silence_deprecation_warnings()
         self._setup_external_case("import bad")
         f = open(os.path.join(self.dir, "bad.py"), "wb")
         try:
@@ -2090,6 +2865,38 @@ class TestTagger(TestCase):
              ], result._events)
 
 
+class TestTimestampingStreamResult(TestCase):
+
+    def test_startTestRun(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.startTestRun()
+        self.assertEqual([('startTestRun',)], result.targets[0]._events)
+
+    def test_stopTestRun(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.stopTestRun()
+        self.assertEqual([('stopTestRun',)], result.targets[0]._events)
+
+    def test_status_no_timestamp(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.status(test_id="A", test_status="B", test_tags="C",
+            runnable="D", file_name="E", file_bytes=b"F", eof=True,
+            mime_type="G", route_code="H")
+        events = result.targets[0]._events
+        self.assertThat(events, HasLength(1))
+        self.assertThat(events[0], HasLength(11))
+        self.assertEqual(
+            ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
+            events[0][:10])
+        self.assertNotEqual(None, events[0][10])
+        self.assertIsInstance(events[0][10], datetime.datetime)
+
+    def test_status_timestamp(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.status(timestamp="F")
+        self.assertEqual("F", result.targets[0]._events[0][10])
+
+
 def test_suite():
     from unittest import TestLoader
     return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testsuite.py b/lib/testtools/testtools/tests/test_testsuite.py
index 3fc837c..3bbe63d 100644
--- a/lib/testtools/testtools/tests/test_testsuite.py
+++ b/lib/testtools/testtools/tests/test_testsuite.py
@@ -4,17 +4,26 @@
 
 __metaclass__ = type
 
+import doctest
+from functools import partial
+import sys
 import unittest
 
+from extras import try_import
+
 from testtools import (
     ConcurrentTestSuite,
+    ConcurrentStreamTestSuite,
     iterate_tests,
     PlaceHolder,
+    TestByTestResult,
     TestCase,
     )
-from testtools.helpers import try_import
+from testtools.compat import _b, _u
+from testtools.matchers import DocTestMatches
 from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
 from testtools.tests.helpers import LoggingResult
+from testtools.testresult.doubles import StreamResult as LoggingStream
 
 FunctionFixture = try_import('fixtures.FunctionFixture')
 
@@ -26,8 +35,23 @@ class Sample(TestCase):
     def test_method2(self):
         pass
 
+
 class TestConcurrentTestSuiteRun(TestCase):
 
+    def test_broken_test(self):
+        log = []
+        def on_test(test, status, start_time, stop_time, tags, details):
+            log.append((test.id(), status, set(details.keys())))
+        class BrokenTest(object):
+            # Simple break - no result parameter to run()
+            def __call__(self):
+                pass
+            run = __call__
+        original_suite = unittest.TestSuite([BrokenTest()])
+        suite = ConcurrentTestSuite(original_suite, self.split_suite)
+        suite.run(TestByTestResult(on_test))
+        self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
+
     def test_trivial(self):
         log = []
         result = LoggingResult(log)
@@ -68,8 +92,113 @@ class TestConcurrentTestSuiteRun(TestCase):
         self.assertNotEqual([], result_log)
 
     def split_suite(self, suite):
-        tests = list(iterate_tests(suite))
-        return tests[0], tests[1]
+        return list(iterate_tests(suite))
+
+
+class TestConcurrentStreamTestSuiteRun(TestCase):
+
+    def test_trivial(self):
+        result = LoggingStream()
+        test1 = Sample('test_method1')
+        test2 = Sample('test_method2')
+        cases = lambda:[(test1, '0'), (test2, '1')]
+        suite = ConcurrentStreamTestSuite(cases)
+        suite.run(result)
+        def freeze(set_or_none):
+            if set_or_none is None:
+                return set_or_none
+            return frozenset(set_or_none)
+        # Ignore event order: we're testing the code is all glued together,
+        # which just means we can pump events through and they get route codes
+        # added appropriately.
+        self.assertEqual(set([
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method1',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             '0',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method1',
+             'success',
+             frozenset(),
+             True,
+             None,
+             None,
+             False,
+             None,
+             '0',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method2',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             '1',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method2',
+             'success',
+             frozenset(),
+             True,
+             None,
+             None,
+             False,
+             None,
+             '1',
+             None,
+             ),
+            ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
+                for event in result._events))
+
+    def test_broken_runner(self):
+        # If the object called breaks, the stream is informed about it
+        # regardless.
+        class BrokenTest(object):
+            # broken - no result parameter!
+            def __call__(self):
+                pass
+            def run(self):
+                pass
+        result = LoggingStream()
+        cases = lambda:[(BrokenTest(), '0')]
+        suite = ConcurrentStreamTestSuite(cases)
+        suite.run(result)
+        events = result._events
+        # Check the traceback loosely.
+        self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
+Traceback (most recent call last):
+  File "...testtools/testsuite.py", line ..., in _run_test
+    test.run(process_result)
+TypeError: run() takes ...1 ...argument...2...given...
+""", doctest.ELLIPSIS))
+        events = [event[0:10] + (None,) for event in events]
+        events[1] = events[1][:6] + (None,) + events[1][7:]
+        self.assertEqual([
+            ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
+            ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
+             True,
+             'text/x-traceback; charset="utf8"; language="python"',
+             '0',
+             None),
+             ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
+            ], events)
+
+    def split_suite(self, suite):
+        tests = list(enumerate(iterate_tests(suite)))
+        return [(test, _u(str(pos))) for pos, test in tests]
 
 
 class TestFixtureSuite(TestCase):
@@ -93,6 +222,19 @@ class TestFixtureSuite(TestCase):
         suite.run(LoggingResult([]))
         self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
 
+    def test_fixture_suite_sort(self):
+        log = []
+        class Sample(TestCase):
+            def test_one(self):
+                log.append(1)
+            def test_two(self):
+                log.append(2)
+        fixture = FunctionFixture(
+            lambda: log.append('setUp'),
+            lambda fixture: log.append('tearDown'))
+        suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
+        self.assertRaises(ValueError, suite.sort_tests)
+
 
 class TestSortedTests(TestCase):
 
@@ -122,6 +264,13 @@ class TestSortedTests(TestCase):
         suite = sorted_tests(unittest.TestSuite([b, a]))
         self.assertEqual([a, b], list(iterate_tests(suite)))
 
+    def test_duplicate_simple_suites(self):
+        a = PlaceHolder('a')
+        b = PlaceHolder('b')
+        c = PlaceHolder('a')
+        self.assertRaises(
+            ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
+
 
 def test_suite():
     from unittest import TestLoader
diff --git a/lib/testtools/testtools/tests/test_with_with.py b/lib/testtools/testtools/tests/test_with_with.py
index e06adeb..4305c62 100644
--- a/lib/testtools/testtools/tests/test_with_with.py
+++ b/lib/testtools/testtools/tests/test_with_with.py
@@ -11,6 +11,7 @@ from testtools import (
 from testtools.matchers import (
     AfterPreprocessing,
     Equals,
+    EndsWith,
     )
 
 
@@ -71,3 +72,17 @@ class TestExpectedException(TestCase):
     def test_pass_on_raise_any_message(self):
         with ExpectedException(ValueError):
             raise ValueError('whatever')
+    
+    def test_annotate(self):
+        def die():
+            with ExpectedException(ValueError, msg="foo"):
+                pass
+        exc = self.assertRaises(AssertionError, die)
+        self.assertThat(exc.args[0], EndsWith(': foo'))
+
+    def test_annotated_matcher(self):
+        def die():
+            with ExpectedException(ValueError, 'bar', msg="foo"):
+                pass
+        exc = self.assertRaises(AssertionError, die)
+        self.assertThat(exc.args[0], EndsWith(': foo'))
diff --git a/lib/testtools/testtools/testsuite.py b/lib/testtools/testtools/testsuite.py
index 67ace56..9e92e0c 100644
--- a/lib/testtools/testtools/testsuite.py
+++ b/lib/testtools/testtools/testsuite.py
@@ -5,17 +5,20 @@
 __metaclass__ = type
 __all__ = [
   'ConcurrentTestSuite',
+  'ConcurrentStreamTestSuite',
+  'filter_by_ids',
   'iterate_tests',
   'sorted_tests',
   ]
 
-from testtools.helpers import safe_hasattr, try_imports
-
-Queue = try_imports(['Queue.Queue', 'queue.Queue'])
-
+import sys
 import threading
 import unittest
 
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
 import testtools
 
 
@@ -98,11 +101,102 @@ class ConcurrentTestSuite(unittest.TestSuite):
 
     def _run_test(self, test, process_result, queue):
         try:
-            test.run(process_result)
+            try:
+                test.run(process_result)
+            except Exception as e:
+                # The run logic itself failed.
+                case = testtools.ErrorHolder(
+                    "broken-runner",
+                    error=sys.exc_info())
+                case.run(process_result)
         finally:
             queue.put(test)
 
 
+class ConcurrentStreamTestSuite(object):
+    """A TestSuite whose run() parallelises."""
+
+    def __init__(self, make_tests):
+        """Create a ConcurrentTestSuite to execute tests returned by make_tests.
+
+        :param make_tests: A helper function that should return some number
+            of concurrently executable test suite / test case objects.
+            make_tests must take no parameters and return an iterable of
+            tuples. Each tuple must be of the form (case, route_code), where
+            case is a TestCase-like object with a run(result) method, and
+            route_code is either None or a unicode string.
+        """
+        super(ConcurrentStreamTestSuite, self).__init__()
+        self.make_tests = make_tests
+
+    def run(self, result):
+        """Run the tests concurrently.
+
+        This calls out to the provided make_tests helper to determine the
+        concurrency to use and to assign routing codes to each worker.
+
+        ConcurrentTestSuite provides no special mechanism to stop the tests
+        returned by make_tests, it is up to the made tests to honour the
+        shouldStop attribute on the result object they are run with, which will
+        be set if the test run is to be aborted.
+
+        The tests are run with an ExtendedToStreamDecorator wrapped around a
+        StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
+        the queue and forwards them to result. Tests can therefore be either
+        original unittest tests (or compatible tests), or new tests that emit
+        StreamResult events directly.
+
+        :param result: A StreamResult instance. The caller is responsible for
+            calling startTestRun on this instance prior to invoking suite.run,
+            and stopTestRun subsequent to the run method returning.
+        """
+        tests = self.make_tests()
+        try:
+            threads = {}
+            queue = Queue()
+            for test, route_code in tests:
+                to_queue = testtools.StreamToQueue(queue, route_code)
+                process_result = testtools.ExtendedToStreamDecorator(
+                    testtools.TimestampingStreamResult(to_queue))
+                runner_thread = threading.Thread(
+                    target=self._run_test,
+                    args=(test, process_result, route_code))
+                threads[to_queue] = runner_thread, process_result
+                runner_thread.start()
+            while threads:
+                event_dict = queue.get()
+                event = event_dict.pop('event')
+                if event == 'status':
+                    result.status(**event_dict)
+                elif event == 'stopTestRun':
+                    thread = threads.pop(event_dict['result'])[0]
+                    thread.join()
+                elif event == 'startTestRun':
+                    pass
+                else:
+                    raise ValueError('unknown event type %r' % (event,))
+        except:
+            for thread, process_result in threads.values():
+                # Signal to each TestControl in the ExtendedToStreamDecorator
+                # that the thread should stop running tests and cleanup
+                process_result.stop()
+            raise
+
+    def _run_test(self, test, process_result, route_code):
+        process_result.startTestRun()
+        try:
+            try:
+                test.run(process_result)
+            except Exception as e:
+                # The run logic itself failed.
+                case = testtools.ErrorHolder(
+                    "broken-runner-'%s'" % (route_code,),
+                    error=sys.exc_info())
+                case.run(process_result)
+        finally:
+            process_result.stopTestRun()
+
+
 class FixtureSuite(unittest.TestSuite):
 
     def __init__(self, fixture, tests):
@@ -147,8 +241,77 @@ def _flatten_tests(suite_or_case, unpack_outer=False):
         return [(suite_id, suite_or_case)]
 
 
+def filter_by_ids(suite_or_case, test_ids):
+    """Remove tests from suite_or_case where their id is not in test_ids.
+    
+    :param suite_or_case: A test suite or test case.
+    :param test_ids: Something that supports the __contains__ protocol.
+    :return: suite_or_case, unless suite_or_case was a case that itself
+        fails the predicate when it will return a new unittest.TestSuite with
+        no contents.
+
+    This helper exists to provide backwards compatability with older versions
+    of Python (currently all versions :)) that don't have a native
+    filter_by_ids() method on Test(Case|Suite).
+
+    For subclasses of TestSuite, filtering is done by:
+        - attempting to call suite.filter_by_ids(test_ids)
+        - if there is no method, iterating the suite and identifying tests to
+          remove, then removing them from _tests, manually recursing into
+          each entry.
+
+    For objects with an id() method - TestCases, filtering is done by:
+        - attempting to return case.filter_by_ids(test_ids)
+        - if there is no such method, checking for case.id() in test_ids
+          and returning case if it is, or TestSuite() if it is not.
+
+    For anything else, it is not filtered - it is returned as-is.
+
+    To provide compatability with this routine for a custom TestSuite, just
+    define a filter_by_ids() method that will return a TestSuite equivalent to
+    the original minus any tests not in test_ids.
+    Similarly to provide compatability for a custom TestCase that does
+    something unusual define filter_by_ids to return a new TestCase object
+    that will only run test_ids that are in the provided container. If none
+    would run, return an empty TestSuite().
+
+    The contract for this function does not require mutation - each filtered
+    object can choose to return a new object with the filtered tests. However
+    because existing custom TestSuite classes in the wild do not have this
+    method, we need a way to copy their state correctly which is tricky:
+    thus the backwards-compatible code paths attempt to mutate in place rather
+    than guessing how to reconstruct a new suite.
+    """
+    # Compatible objects
+    if safe_hasattr(suite_or_case, 'filter_by_ids'):
+        return suite_or_case.filter_by_ids(test_ids)
+    # TestCase objects.
+    if safe_hasattr(suite_or_case, 'id'):
+        if suite_or_case.id() in test_ids:
+            return suite_or_case
+        else:
+            return unittest.TestSuite()
+    # Standard TestSuites or derived classes [assumed to be mutable].
+    if isinstance(suite_or_case, unittest.TestSuite):
+        filtered = []
+        for item in suite_or_case:
+            filtered.append(filter_by_ids(item, test_ids))
+        suite_or_case._tests[:] = filtered
+    # Everything else:
+    return suite_or_case
+
+
 def sorted_tests(suite_or_case, unpack_outer=False):
     """Sort suite_or_case while preserving non-vanilla TestSuites."""
+    # Duplicate test id can induce TypeError in Python 3.3.
+    # Detect the duplicate test id, raise exception when found.
+    seen = set()
+    for test_case in iterate_tests(suite_or_case):
+        test_id = test_case.id()
+        if test_id not in seen:
+            seen.add(test_id)
+        else:
+            raise ValueError('Duplicate test id detected: %s' % (test_id,))
     tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
     tests.sort()
     return unittest.TestSuite([test for (sort_key, test) in tests])
-- 
2.1.1



More information about the samba-technical mailing list