[PATCH 06/13] Move testtools to third_party/.

Jelmer Vernooij jelmer at samba.org
Sat Nov 15 12:17:01 MST 2014


Change-Id: I97e99746e9118c61d6c5b8b3e3a250177f7719f1
Signed-Off-By: Jelmer Vernooij <jelmer at samba.org>
---
 lib/testtools/.gitignore                           |   18 -
 lib/testtools/.gitreview                           |    4 -
 lib/testtools/.testr.conf                          |    4 -
 lib/testtools/.travis.yml                          |   25 -
 lib/testtools/LICENSE                              |   59 -
 lib/testtools/MANIFEST.in                          |   10 -
 lib/testtools/Makefile                             |   56 -
 lib/testtools/NEWS                                 | 1414 ----------
 lib/testtools/README.rst                           |   92 -
 lib/testtools/doc/Makefile                         |   89 -
 lib/testtools/doc/_static/placeholder.txt          |    0
 lib/testtools/doc/_templates/placeholder.txt       |    0
 lib/testtools/doc/api.rst                          |   26 -
 lib/testtools/doc/conf.py                          |  194 --
 lib/testtools/doc/for-framework-folk.rst           |  453 ---
 lib/testtools/doc/for-test-authors.rst             | 1485 ----------
 lib/testtools/doc/hacking.rst                      |  194 --
 lib/testtools/doc/index.rst                        |   36 -
 lib/testtools/doc/make.bat                         |  113 -
 lib/testtools/doc/overview.rst                     |  101 -
 lib/testtools/scripts/README                       |    3 -
 lib/testtools/scripts/_lp_release.py               |  232 --
 lib/testtools/scripts/all-pythons                  |   93 -
 lib/testtools/scripts/update-rtfd                  |   11 -
 lib/testtools/setup.cfg                            |    4 -
 lib/testtools/setup.py                             |   86 -
 lib/testtools/testtools/__init__.py                |  125 -
 lib/testtools/testtools/_compat2x.py               |   17 -
 lib/testtools/testtools/_compat3x.py               |   17 -
 lib/testtools/testtools/_spinner.py                |  316 ---
 lib/testtools/testtools/assertions.py              |   22 -
 lib/testtools/testtools/compat.py                  |  386 ---
 lib/testtools/testtools/content.py                 |  383 ---
 lib/testtools/testtools/content_type.py            |   41 -
 lib/testtools/testtools/deferredruntest.py         |  344 ---
 lib/testtools/testtools/distutilscmd.py            |   62 -
 lib/testtools/testtools/helpers.py                 |   48 -
 lib/testtools/testtools/matchers/__init__.py       |  119 -
 lib/testtools/testtools/matchers/_basic.py         |  326 ---
 .../testtools/matchers/_datastructures.py          |  228 --
 lib/testtools/testtools/matchers/_dict.py          |  259 --
 lib/testtools/testtools/matchers/_doctest.py       |  104 -
 lib/testtools/testtools/matchers/_exception.py     |  136 -
 lib/testtools/testtools/matchers/_filesystem.py    |  192 --
 lib/testtools/testtools/matchers/_higherorder.py   |  368 ---
 lib/testtools/testtools/matchers/_impl.py          |  173 --
 lib/testtools/testtools/monkey.py                  |   97 -
 lib/testtools/testtools/run.py                     |  535 ----
 lib/testtools/testtools/runtest.py                 |  227 --
 lib/testtools/testtools/tags.py                    |   34 -
 lib/testtools/testtools/testcase.py                | 1022 -------
 lib/testtools/testtools/testresult/__init__.py     |   49 -
 lib/testtools/testtools/testresult/doubles.py      |  174 --
 lib/testtools/testtools/testresult/real.py         | 1777 ------------
 lib/testtools/testtools/tests/__init__.py          |   49 -
 lib/testtools/testtools/tests/helpers.py           |  108 -
 lib/testtools/testtools/tests/matchers/__init__.py |   29 -
 lib/testtools/testtools/tests/matchers/helpers.py  |   42 -
 .../testtools/tests/matchers/test_basic.py         |  396 ---
 .../tests/matchers/test_datastructures.py          |  209 --
 .../testtools/tests/matchers/test_dict.py          |  227 --
 .../testtools/tests/matchers/test_doctest.py       |   82 -
 .../testtools/tests/matchers/test_exception.py     |  187 --
 .../testtools/tests/matchers/test_filesystem.py    |  243 --
 .../testtools/tests/matchers/test_higherorder.py   |  254 --
 .../testtools/tests/matchers/test_impl.py          |  132 -
 lib/testtools/testtools/tests/test_assert_that.py  |  152 -
 lib/testtools/testtools/tests/test_compat.py       |  603 ----
 lib/testtools/testtools/tests/test_content.py      |  366 ---
 lib/testtools/testtools/tests/test_content_type.py |   66 -
 .../testtools/tests/test_deferredruntest.py        |  777 ------
 lib/testtools/testtools/tests/test_distutilscmd.py |  100 -
 .../testtools/tests/test_fixturesupport.py         |  145 -
 lib/testtools/testtools/tests/test_helpers.py      |   30 -
 lib/testtools/testtools/tests/test_monkey.py       |  167 --
 lib/testtools/testtools/tests/test_run.py          |  309 ---
 lib/testtools/testtools/tests/test_runtest.py      |  335 ---
 lib/testtools/testtools/tests/test_spinner.py      |  326 ---
 lib/testtools/testtools/tests/test_tags.py         |   84 -
 lib/testtools/testtools/tests/test_testcase.py     | 1733 ------------
 lib/testtools/testtools/tests/test_testresult.py   | 2913 --------------------
 lib/testtools/testtools/tests/test_testsuite.py    |  277 --
 lib/testtools/testtools/tests/test_with_with.py    |   88 -
 lib/testtools/testtools/testsuite.py               |  317 ---
 lib/testtools/testtools/utils.py                   |   13 -
 lib/update-external.sh                             |    2 +-
 lib/wscript_build                                  |    1 -
 python/samba/tests/__init__.py                     |    2 +-
 selftest/subunithelper.py                          |    2 +-
 third_party/testtools/.gitignore                   |   18 +
 third_party/testtools/.gitreview                   |    4 +
 third_party/testtools/.testr.conf                  |    4 +
 third_party/testtools/.travis.yml                  |   25 +
 third_party/testtools/LICENSE                      |   59 +
 third_party/testtools/MANIFEST.in                  |   10 +
 third_party/testtools/Makefile                     |   56 +
 third_party/testtools/NEWS                         | 1414 ++++++++++
 third_party/testtools/README.rst                   |   92 +
 third_party/testtools/doc/Makefile                 |   89 +
 third_party/testtools/doc/_static/placeholder.txt  |    0
 .../testtools/doc/_templates/placeholder.txt       |    0
 third_party/testtools/doc/api.rst                  |   26 +
 third_party/testtools/doc/conf.py                  |  194 ++
 third_party/testtools/doc/for-framework-folk.rst   |  453 +++
 third_party/testtools/doc/for-test-authors.rst     | 1485 ++++++++++
 third_party/testtools/doc/hacking.rst              |  194 ++
 third_party/testtools/doc/index.rst                |   35 +
 third_party/testtools/doc/make.bat                 |  113 +
 third_party/testtools/doc/overview.rst             |  101 +
 third_party/testtools/scripts/README               |    3 +
 third_party/testtools/scripts/_lp_release.py       |  232 ++
 third_party/testtools/scripts/all-pythons          |   93 +
 third_party/testtools/scripts/update-rtfd          |   11 +
 third_party/testtools/setup.cfg                    |    4 +
 third_party/testtools/setup.py                     |   86 +
 third_party/testtools/testtools/__init__.py        |  125 +
 third_party/testtools/testtools/_compat2x.py       |   16 +
 third_party/testtools/testtools/_compat3x.py       |   16 +
 third_party/testtools/testtools/_spinner.py        |  316 +++
 third_party/testtools/testtools/assertions.py      |   22 +
 third_party/testtools/testtools/compat.py          |  386 +++
 third_party/testtools/testtools/content.py         |  383 +++
 third_party/testtools/testtools/content_type.py    |   41 +
 third_party/testtools/testtools/deferredruntest.py |  344 +++
 third_party/testtools/testtools/distutilscmd.py    |   62 +
 third_party/testtools/testtools/helpers.py         |   48 +
 .../testtools/testtools/matchers/__init__.py       |  119 +
 third_party/testtools/testtools/matchers/_basic.py |  326 +++
 .../testtools/matchers/_datastructures.py          |  228 ++
 third_party/testtools/testtools/matchers/_dict.py  |  259 ++
 .../testtools/testtools/matchers/_doctest.py       |  104 +
 .../testtools/testtools/matchers/_exception.py     |  136 +
 .../testtools/testtools/matchers/_filesystem.py    |  192 ++
 .../testtools/testtools/matchers/_higherorder.py   |  368 +++
 third_party/testtools/testtools/matchers/_impl.py  |  173 ++
 third_party/testtools/testtools/monkey.py          |   97 +
 third_party/testtools/testtools/run.py             |  535 ++++
 third_party/testtools/testtools/runtest.py         |  227 ++
 third_party/testtools/testtools/tags.py            |   34 +
 third_party/testtools/testtools/testcase.py        | 1022 +++++++
 .../testtools/testtools/testresult/__init__.py     |   49 +
 .../testtools/testtools/testresult/doubles.py      |  174 ++
 third_party/testtools/testtools/testresult/real.py | 1777 ++++++++++++
 third_party/testtools/testtools/tests/__init__.py  |   49 +
 third_party/testtools/testtools/tests/helpers.py   |  108 +
 .../testtools/testtools/tests/matchers/__init__.py |   29 +
 .../testtools/testtools/tests/matchers/helpers.py  |   42 +
 .../testtools/tests/matchers/test_basic.py         |  396 +++
 .../tests/matchers/test_datastructures.py          |  209 ++
 .../testtools/tests/matchers/test_dict.py          |  227 ++
 .../testtools/tests/matchers/test_doctest.py       |   82 +
 .../testtools/tests/matchers/test_exception.py     |  187 ++
 .../testtools/tests/matchers/test_filesystem.py    |  243 ++
 .../testtools/tests/matchers/test_higherorder.py   |  254 ++
 .../testtools/tests/matchers/test_impl.py          |  132 +
 .../testtools/testtools/tests/test_assert_that.py  |  152 +
 .../testtools/testtools/tests/test_compat.py       |  603 ++++
 .../testtools/testtools/tests/test_content.py      |  366 +++
 .../testtools/testtools/tests/test_content_type.py |   66 +
 .../testtools/tests/test_deferredruntest.py        |  777 ++++++
 .../testtools/testtools/tests/test_distutilscmd.py |  100 +
 .../testtools/tests/test_fixturesupport.py         |  145 +
 .../testtools/testtools/tests/test_helpers.py      |   30 +
 .../testtools/testtools/tests/test_monkey.py       |  167 ++
 third_party/testtools/testtools/tests/test_run.py  |  309 +++
 .../testtools/testtools/tests/test_runtest.py      |  335 +++
 .../testtools/testtools/tests/test_spinner.py      |  326 +++
 third_party/testtools/testtools/tests/test_tags.py |   84 +
 .../testtools/testtools/tests/test_testcase.py     | 1733 ++++++++++++
 .../testtools/testtools/tests/test_testresult.py   | 2913 ++++++++++++++++++++
 .../testtools/testtools/tests/test_testsuite.py    |  277 ++
 .../testtools/testtools/tests/test_with_with.py    |   88 +
 third_party/testtools/testtools/testsuite.py       |  317 +++
 third_party/testtools/testtools/utils.py           |   12 +
 third_party/wscript_build                          |    1 +
 175 files changed, 23142 insertions(+), 23146 deletions(-)
 delete mode 100644 lib/testtools/.gitignore
 delete mode 100644 lib/testtools/.gitreview
 delete mode 100644 lib/testtools/.testr.conf
 delete mode 100644 lib/testtools/.travis.yml
 delete mode 100644 lib/testtools/LICENSE
 delete mode 100644 lib/testtools/MANIFEST.in
 delete mode 100644 lib/testtools/Makefile
 delete mode 100644 lib/testtools/NEWS
 delete mode 100644 lib/testtools/README.rst
 delete mode 100644 lib/testtools/doc/Makefile
 delete mode 100644 lib/testtools/doc/_static/placeholder.txt
 delete mode 100644 lib/testtools/doc/_templates/placeholder.txt
 delete mode 100644 lib/testtools/doc/api.rst
 delete mode 100644 lib/testtools/doc/conf.py
 delete mode 100644 lib/testtools/doc/for-framework-folk.rst
 delete mode 100644 lib/testtools/doc/for-test-authors.rst
 delete mode 100644 lib/testtools/doc/hacking.rst
 delete mode 100644 lib/testtools/doc/index.rst
 delete mode 100644 lib/testtools/doc/make.bat
 delete mode 100644 lib/testtools/doc/overview.rst
 delete mode 100644 lib/testtools/scripts/README
 delete mode 100644 lib/testtools/scripts/_lp_release.py
 delete mode 100755 lib/testtools/scripts/all-pythons
 delete mode 100755 lib/testtools/scripts/update-rtfd
 delete mode 100644 lib/testtools/setup.cfg
 delete mode 100755 lib/testtools/setup.py
 delete mode 100644 lib/testtools/testtools/__init__.py
 delete mode 100644 lib/testtools/testtools/_compat2x.py
 delete mode 100644 lib/testtools/testtools/_compat3x.py
 delete mode 100644 lib/testtools/testtools/_spinner.py
 delete mode 100644 lib/testtools/testtools/assertions.py
 delete mode 100644 lib/testtools/testtools/compat.py
 delete mode 100644 lib/testtools/testtools/content.py
 delete mode 100644 lib/testtools/testtools/content_type.py
 delete mode 100644 lib/testtools/testtools/deferredruntest.py
 delete mode 100644 lib/testtools/testtools/distutilscmd.py
 delete mode 100644 lib/testtools/testtools/helpers.py
 delete mode 100644 lib/testtools/testtools/matchers/__init__.py
 delete mode 100644 lib/testtools/testtools/matchers/_basic.py
 delete mode 100644 lib/testtools/testtools/matchers/_datastructures.py
 delete mode 100644 lib/testtools/testtools/matchers/_dict.py
 delete mode 100644 lib/testtools/testtools/matchers/_doctest.py
 delete mode 100644 lib/testtools/testtools/matchers/_exception.py
 delete mode 100644 lib/testtools/testtools/matchers/_filesystem.py
 delete mode 100644 lib/testtools/testtools/matchers/_higherorder.py
 delete mode 100644 lib/testtools/testtools/matchers/_impl.py
 delete mode 100644 lib/testtools/testtools/monkey.py
 delete mode 100755 lib/testtools/testtools/run.py
 delete mode 100644 lib/testtools/testtools/runtest.py
 delete mode 100644 lib/testtools/testtools/tags.py
 delete mode 100644 lib/testtools/testtools/testcase.py
 delete mode 100644 lib/testtools/testtools/testresult/__init__.py
 delete mode 100644 lib/testtools/testtools/testresult/doubles.py
 delete mode 100644 lib/testtools/testtools/testresult/real.py
 delete mode 100644 lib/testtools/testtools/tests/__init__.py
 delete mode 100644 lib/testtools/testtools/tests/helpers.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/__init__.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/helpers.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_basic.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_datastructures.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_dict.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_doctest.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_exception.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_filesystem.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_higherorder.py
 delete mode 100644 lib/testtools/testtools/tests/matchers/test_impl.py
 delete mode 100644 lib/testtools/testtools/tests/test_assert_that.py
 delete mode 100644 lib/testtools/testtools/tests/test_compat.py
 delete mode 100644 lib/testtools/testtools/tests/test_content.py
 delete mode 100644 lib/testtools/testtools/tests/test_content_type.py
 delete mode 100644 lib/testtools/testtools/tests/test_deferredruntest.py
 delete mode 100644 lib/testtools/testtools/tests/test_distutilscmd.py
 delete mode 100644 lib/testtools/testtools/tests/test_fixturesupport.py
 delete mode 100644 lib/testtools/testtools/tests/test_helpers.py
 delete mode 100644 lib/testtools/testtools/tests/test_monkey.py
 delete mode 100644 lib/testtools/testtools/tests/test_run.py
 delete mode 100644 lib/testtools/testtools/tests/test_runtest.py
 delete mode 100644 lib/testtools/testtools/tests/test_spinner.py
 delete mode 100644 lib/testtools/testtools/tests/test_tags.py
 delete mode 100644 lib/testtools/testtools/tests/test_testcase.py
 delete mode 100644 lib/testtools/testtools/tests/test_testresult.py
 delete mode 100644 lib/testtools/testtools/tests/test_testsuite.py
 delete mode 100644 lib/testtools/testtools/tests/test_with_with.py
 delete mode 100644 lib/testtools/testtools/testsuite.py
 delete mode 100644 lib/testtools/testtools/utils.py
 create mode 100644 third_party/testtools/.gitignore
 create mode 100644 third_party/testtools/.gitreview
 create mode 100644 third_party/testtools/.testr.conf
 create mode 100644 third_party/testtools/.travis.yml
 create mode 100644 third_party/testtools/LICENSE
 create mode 100644 third_party/testtools/MANIFEST.in
 create mode 100644 third_party/testtools/Makefile
 create mode 100644 third_party/testtools/NEWS
 create mode 100644 third_party/testtools/README.rst
 create mode 100644 third_party/testtools/doc/Makefile
 create mode 100644 third_party/testtools/doc/_static/placeholder.txt
 create mode 100644 third_party/testtools/doc/_templates/placeholder.txt
 create mode 100644 third_party/testtools/doc/api.rst
 create mode 100644 third_party/testtools/doc/conf.py
 create mode 100644 third_party/testtools/doc/for-framework-folk.rst
 create mode 100644 third_party/testtools/doc/for-test-authors.rst
 create mode 100644 third_party/testtools/doc/hacking.rst
 create mode 100644 third_party/testtools/doc/index.rst
 create mode 100644 third_party/testtools/doc/make.bat
 create mode 100644 third_party/testtools/doc/overview.rst
 create mode 100644 third_party/testtools/scripts/README
 create mode 100644 third_party/testtools/scripts/_lp_release.py
 create mode 100755 third_party/testtools/scripts/all-pythons
 create mode 100755 third_party/testtools/scripts/update-rtfd
 create mode 100644 third_party/testtools/setup.cfg
 create mode 100755 third_party/testtools/setup.py
 create mode 100644 third_party/testtools/testtools/__init__.py
 create mode 100644 third_party/testtools/testtools/_compat2x.py
 create mode 100644 third_party/testtools/testtools/_compat3x.py
 create mode 100644 third_party/testtools/testtools/_spinner.py
 create mode 100644 third_party/testtools/testtools/assertions.py
 create mode 100644 third_party/testtools/testtools/compat.py
 create mode 100644 third_party/testtools/testtools/content.py
 create mode 100644 third_party/testtools/testtools/content_type.py
 create mode 100644 third_party/testtools/testtools/deferredruntest.py
 create mode 100644 third_party/testtools/testtools/distutilscmd.py
 create mode 100644 third_party/testtools/testtools/helpers.py
 create mode 100644 third_party/testtools/testtools/matchers/__init__.py
 create mode 100644 third_party/testtools/testtools/matchers/_basic.py
 create mode 100644 third_party/testtools/testtools/matchers/_datastructures.py
 create mode 100644 third_party/testtools/testtools/matchers/_dict.py
 create mode 100644 third_party/testtools/testtools/matchers/_doctest.py
 create mode 100644 third_party/testtools/testtools/matchers/_exception.py
 create mode 100644 third_party/testtools/testtools/matchers/_filesystem.py
 create mode 100644 third_party/testtools/testtools/matchers/_higherorder.py
 create mode 100644 third_party/testtools/testtools/matchers/_impl.py
 create mode 100644 third_party/testtools/testtools/monkey.py
 create mode 100755 third_party/testtools/testtools/run.py
 create mode 100644 third_party/testtools/testtools/runtest.py
 create mode 100644 third_party/testtools/testtools/tags.py
 create mode 100644 third_party/testtools/testtools/testcase.py
 create mode 100644 third_party/testtools/testtools/testresult/__init__.py
 create mode 100644 third_party/testtools/testtools/testresult/doubles.py
 create mode 100644 third_party/testtools/testtools/testresult/real.py
 create mode 100644 third_party/testtools/testtools/tests/__init__.py
 create mode 100644 third_party/testtools/testtools/tests/helpers.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/__init__.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/helpers.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_basic.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_datastructures.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_dict.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_doctest.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_exception.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_filesystem.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_higherorder.py
 create mode 100644 third_party/testtools/testtools/tests/matchers/test_impl.py
 create mode 100644 third_party/testtools/testtools/tests/test_assert_that.py
 create mode 100644 third_party/testtools/testtools/tests/test_compat.py
 create mode 100644 third_party/testtools/testtools/tests/test_content.py
 create mode 100644 third_party/testtools/testtools/tests/test_content_type.py
 create mode 100644 third_party/testtools/testtools/tests/test_deferredruntest.py
 create mode 100644 third_party/testtools/testtools/tests/test_distutilscmd.py
 create mode 100644 third_party/testtools/testtools/tests/test_fixturesupport.py
 create mode 100644 third_party/testtools/testtools/tests/test_helpers.py
 create mode 100644 third_party/testtools/testtools/tests/test_monkey.py
 create mode 100644 third_party/testtools/testtools/tests/test_run.py
 create mode 100644 third_party/testtools/testtools/tests/test_runtest.py
 create mode 100644 third_party/testtools/testtools/tests/test_spinner.py
 create mode 100644 third_party/testtools/testtools/tests/test_tags.py
 create mode 100644 third_party/testtools/testtools/tests/test_testcase.py
 create mode 100644 third_party/testtools/testtools/tests/test_testresult.py
 create mode 100644 third_party/testtools/testtools/tests/test_testsuite.py
 create mode 100644 third_party/testtools/testtools/tests/test_with_with.py
 create mode 100644 third_party/testtools/testtools/testsuite.py
 create mode 100644 third_party/testtools/testtools/utils.py

diff --git a/lib/testtools/.gitignore b/lib/testtools/.gitignore
deleted file mode 100644
index acf9b74..0000000
--- a/lib/testtools/.gitignore
+++ /dev/null
@@ -1,18 +0,0 @@
-__pycache__
-./build
-MANIFEST
-dist
-tags
-TAGS
-apidocs
-_trial_temp
-doc/_build
-.testrepository
-.lp_creds
-./testtools.egg-info
-*.pyc
-*.swp
-*~
-testtools.egg-info
-/build/
-/.env/
diff --git a/lib/testtools/.gitreview b/lib/testtools/.gitreview
deleted file mode 100644
index 5d15856..0000000
--- a/lib/testtools/.gitreview
+++ /dev/null
@@ -1,4 +0,0 @@
-[gerrit]
-host=review.testing-cabal.org
-port=29418
-project=testing-cabal/testtools.git
diff --git a/lib/testtools/.testr.conf b/lib/testtools/.testr.conf
deleted file mode 100644
index e695109..0000000
--- a/lib/testtools/.testr.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[DEFAULT]
-test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION testtools.tests.test_suite
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/lib/testtools/.travis.yml b/lib/testtools/.travis.yml
deleted file mode 100644
index 5e0e85a..0000000
--- a/lib/testtools/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-language: python
-
-python:
-  - "2.6"
-  - "2.7"
-  - "3.3"
-  - "pypy"
-
-# We have to pin Jinja2 < 2.7  for Python 3.2 because 2.7 drops/breaks support:
-# http://jinja.pocoo.org/docs/changelog/#version-2-7
-#
-# See also:
-# http://stackoverflow.com/questions/18252804/syntax-error-in-jinja-2-library
-matrix:
-  include:
-    - python: "3.2"
-      env: JINJA_REQ="jinja2<2.7"
-
-install:
-  - pip install -q --use-mirrors fixtures extras python-mimeparse $JINJA_REQ sphinx
-  - python setup.py -q install
-
-script:
-  - python -m testtools.run testtools.tests.test_suite
-  - make clean-sphinx docs
diff --git a/lib/testtools/LICENSE b/lib/testtools/LICENSE
deleted file mode 100644
index 21010cc..0000000
--- a/lib/testtools/LICENSE
+++ /dev/null
@@ -1,59 +0,0 @@
-Copyright (c) 2008-2011 Jonathan M. Lange <jml at mumak.net> and the testtools
-authors.
-
-The testtools authors are:
- * Canonical Ltd
- * Twisted Matrix Labs
- * Jonathan Lange
- * Robert Collins
- * Andrew Bennetts
- * Benjamin Peterson
- * Jamu Kakar
- * James Westby
- * Martin [gz]
- * Michael Hudson-Doyle
- * Aaron Bentley
- * Christian Kampka
- * Gavin Panella
- * Martin Pool
- * Vincent Ladeuil
- * Nikola Đipanov
-
-and are collectively referred to as "testtools developers".
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-Some code in testtools/run.py taken from Python's unittest module:
-Copyright (c) 1999-2003 Steve Purcell
-Copyright (c) 2003-2010 Python Software Foundation
-
-This module is free software, and you may redistribute it and/or modify
-it under the same terms as Python itself, so long as this copyright message
-and disclaimer are retained in their original form.
-
-IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
-SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
-THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGE.
-
-THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE.  THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
-AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
-SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
diff --git a/lib/testtools/MANIFEST.in b/lib/testtools/MANIFEST.in
deleted file mode 100644
index 4619349..0000000
--- a/lib/testtools/MANIFEST.in
+++ /dev/null
@@ -1,10 +0,0 @@
-include LICENSE
-include Makefile
-include MANIFEST.in
-include NEWS
-include README.rst
-include .gitignore
-graft doc
-graft doc/_static
-graft doc/_templates
-prune doc/_build
diff --git a/lib/testtools/Makefile b/lib/testtools/Makefile
deleted file mode 100644
index c637123..0000000
--- a/lib/testtools/Makefile
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
-
-PYTHON=python
-SOURCES=$(shell find testtools -name "*.py")
-
-check:
-	PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite
-
-TAGS: ${SOURCES}
-	ctags -e -R testtools/
-
-tags: ${SOURCES}
-	ctags -R testtools/
-
-clean: clean-sphinx
-	rm -f TAGS tags
-	find testtools -name "*.pyc" -exec rm '{}' \;
-
-prerelease:
-	# An existing MANIFEST breaks distutils sometimes. Avoid that.
-	-rm MANIFEST
-
-release:
-	./setup.py sdist bdist_wheel upload --sign
-	$(PYTHON) scripts/_lp_release.py
-
-snapshot: prerelease
-	./setup.py sdist bdist_wheel
-
-### Documentation ###
-
-apidocs:
-	# pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
-	PYTHONWARNINGS='ignore::DeprecationWarning' \
-		pydoctor --make-html --add-package testtools \
-		--docformat=restructuredtext --project-name=testtools \
-		--project-url=https://github.com/testing-cabal/testtools
-
-doc/news.rst:
-	ln -s ../NEWS doc/news.rst
-
-docs: doc/news.rst docs-sphinx
-	rm doc/news.rst
-
-docs-sphinx: html-sphinx
-
-# Clean out generated documentation
-clean-sphinx:
-	cd doc && make clean
-
-# Build the html docs using Sphinx.
-html-sphinx:
-	cd doc && make html
-
-.PHONY: apidocs docs-sphinx clean-sphinx html-sphinx docs
-.PHONY: check clean prerelease release
diff --git a/lib/testtools/NEWS b/lib/testtools/NEWS
deleted file mode 100644
index 769f118..0000000
--- a/lib/testtools/NEWS
+++ /dev/null
@@ -1,1414 +0,0 @@
-testtools NEWS
-++++++++++++++
-
-Changes and improvements to testtools_, grouped by release.
-
-
-NEXT
-~~~~
-
-Changes
--------
-
-* Fixed unit tests which were failing under pypy due to a change in the way
-  pypy formats tracebacks. (Thomi Richards)
-
-* Make `testtools.content.text_content` error if anything other than text
-  is given as content. (Thomi Richards)
-
-* We now publish wheels of testtools. (Robert Collins, #issue84)
-
-1.1.0
-~~~~~
-
-Improvements
-------------
-
-* Exceptions in a ``fixture.getDetails`` method will no longer mask errors
-  raised from the same fixture's ``setUp`` method.
-  (Robert Collins, #1368440)
-
-1.0.0
-~~~~~
-
-Long overdue, we've adopted a backwards compatibility statement and recognized
-that we have plenty of users depending on our behaviour - calling our version
-1.0.0 is a recognition of that.
-
-Improvements
-------------
-
-* Fix a long-standing bug where tearDown and cleanUps would not be called if the
-  test run was interrupted. This should fix leaking external resources from
-  interrupted tests.
-  (Robert Collins, #1364188)
-
-* Fix a long-standing bug where calling sys.exit(0) from within a test would
-  cause the test suite to exit with 0, without reporting a failure of that
-  test. We still allow the test suite to be exited (since catching higher order
-  exceptions requires exceptional circumstances) but we now call a last-resort
-  handler on the TestCase, resulting in an error being reported for the test.
-  (Robert Collins, #1364188)
-
-* Fix an issue where tests skipped with the ``skip``* family of decorators would
-  still have their ``setUp`` and ``tearDown`` functions called.
-  (Thomi Richards, #https://github.com/testing-cabal/testtools/issues/86)
-
-* We have adopted a formal backwards compatibility statement (see hacking.rst)
-  (Robert Collins)
-
-0.9.39
-~~~~~~
-
-Brown paper bag release - 0.9.38 was broken for some users,
-_jython_aware_splitext was not defined entirely compatibly.
-(Robert Collins, #https://github.com/testing-cabal/testtools/issues/100)
-
-0.9.38
-~~~~~~
-
-Bug fixes for test importing.
-
-Improvements
-------------
-
-* Discovery import error detection wasn't implemented for python 2.6 (the
-  'discover' module). (Robert Collins)
-
-* Discovery now executes load_tests (if present) in __init__ in all packages.
-  (Robert Collins, http://bugs.python.org/issue16662)
-
-0.9.37
-~~~~~~
-
-Minor improvements to correctness.
-
-Changes
--------
-
-* ``stdout`` is now correctly honoured on ``run.TestProgram`` - before the
-  runner objects would be created with no stdout parameter. If construction
-  fails, the previous parameter list is attempted, permitting compatibility
-  with Runner classes that don't accept stdout as a parameter.
-  (Robert Collins)
-
-* The ``ExtendedToStreamDecorator`` now handles content objects with one less
-  packet - the last packet of the source content is sent with EOF set rather
-  than an empty packet with EOF set being sent after the last packet of the
-  source content. (Robert Collins)
-
-0.9.36
-~~~~~~
-
-Welcome to our long overdue 0.9.36 release, which improves compatibility with
-Python3.4, adds assert_that, a function for using matchers without TestCase
-objects, and finally will error if you try to use setUp or tearDown twice -
-since that invariably leads to bad things of one sort or another happening.
-
-Changes
--------
-
-* Error if ``setUp`` or ``tearDown`` are called twice.
-  (Robert Collins, #882884)
-
-* Make testtools compatible with the ``unittest.expectedFailure`` decorator in
-  Python 3.4. (Thomi Richards)
-
-
-Improvements
-------------
-
-* Introduce the assert_that function, which allows matchers to be used
-  independent of testtools.TestCase. (Daniel Watkins, #1243834)
-
-
-0.9.35
-~~~~~~
-
-Changes
--------
-
-* Removed a number of code paths where Python 2.4 and Python 2.5 were
-  explicitly handled. (Daniel Watkins)
-
-Improvements
-------------
-
-* Added the ``testtools.TestCase.expectThat`` method, which implements
-  delayed assertions. (Thomi Richards)
-
-* Docs are now built as part of the Travis-CI build, reducing the chance of
-  Read The Docs being broken accidentally. (Daniel Watkins, #1158773)
-
-0.9.34
-~~~~~~
-
-Improvements
-------------
-
-* Added ability for ``testtools.TestCase`` instances to force a test to
-  fail, even if no assertions failed. (Thomi Richards)
-
-* Added ``testtools.content.StacktraceContent``, a content object that
-  automatically creates a ``StackLinesContent`` object containing the current
-  stack trace. (Thomi Richards)
-
-* ``AnyMatch`` is now exported properly in ``testtools.matchers``.
-  (Robert Collins, Rob Kennedy, github #44)
-
-* In Python 3.3, if there are duplicate test ids, tests.sort() will
-  fail and raise TypeError. Detect the duplicate test ids firstly in
-  sorted_tests() to ensure that all test ids are unique.
-  (Kui Shi, #1243922)
-
-* ``json_content`` is now in the ``__all__`` attribute for
-  ``testtools.content``. (Robert Collins)
-
-* Network tests now bind to 127.0.0.1 to avoid (even temporary) network
-  visible ports. (Benedikt Morbach, github #46)
-
-* Test listing now explicitly indicates by printing 'Failed to import' and
-  exiting (2) when an import has failed rather than only signalling through the
-  test name. (Robert Collins, #1245672)
-
-* ``test_compat.TestDetectEncoding.test_bom`` now works on Python 3.3 - the
-  corner case with euc_jp is no longer permitted in Python 3.3 so we can
-  skip it. (Martin [gz], #1251962)
-
-0.9.33
-~~~~~~
-
-Improvements
-------------
-
-* Added ``addDetailuniqueName`` method to ``testtools.TestCase`` class.
-  (Thomi Richards)
-
-* Removed some unused code from ``testtools.content.TracebackContent``.
-  (Thomi Richards)
-
-* Added ``testtools.StackLinesContent``: a content object for displaying
-  pre-processed stack lines. (Thomi Richards)
-
-* ``StreamSummary`` was calculating testsRun incorrectly: ``exists`` status
-  tests were counted as run tests, but they are not.
-  (Robert Collins, #1203728)
-
-0.9.32
-~~~~~~
-
-Regular maintenance release.  Special thanks to new contributor, Xiao Hanyu!
-
-Changes
--------
-
- * ``testttols.compat._format_exc_info`` has been refactored into several
-   smaller functions. (Thomi Richards)
-
-Improvements
-------------
-
-* Stacktrace filtering no longer hides unittest frames that are surrounded by
-  user frames. We will reenable this when we figure out a better algorithm for
-  retaining meaning. (Robert Collins, #1188420)
-
-* The compatibility code for skipped tests with unittest2 was broken.
-  (Robert Collins, #1190951)
-
-* Various documentation improvements (Clint Byrum, Xiao Hanyu).
-
-0.9.31
-~~~~~~
-
-Improvements
-------------
-
-* ``ExpectedException`` now accepts a msg parameter for describing an error,
-  much the same as assertEquals etc. (Robert Collins)
-
-0.9.30
-~~~~~~
-
-A new sort of TestResult, the StreamResult has been added, as a prototype for
-a revised standard library test result API.  Expect this API to change.
-Although we will try to preserve compatibility for early adopters, it is
-experimental and we might need to break it if it turns out to be unsuitable.
-
-Improvements
-------------
-* ``assertRaises`` works properly for exception classes that have custom
-  metaclasses
-
-* ``ConcurrentTestSuite`` was silently eating exceptions that propagate from
-  the test.run(result) method call. Ignoring them is fine in a normal test
-  runner, but when they happen in a different thread, the thread that called
-  suite.run() is not in the stack anymore, and the exceptions are lost. We now
-  create a synthetic test recording any such exception.
-  (Robert Collins, #1130429)
-
-* Fixed SyntaxError raised in ``_compat2x.py`` when installing via Python 3.
-  (Will Bond, #941958)
-
-* New class ``StreamResult`` which defines the API for the new result type.
-  (Robert Collins)
-
-* New support class ``ConcurrentStreamTestSuite`` for convenient construction
-  and utilisation of ``StreamToQueue`` objects. (Robert Collins)
-
-* New support class ``CopyStreamResult`` which forwards events onto multiple
-  ``StreamResult`` objects (each of which receives all the events).
-  (Robert Collins)
-
-* New support class ``StreamSummary`` which summarises a ``StreamResult``
-  stream compatibly with ``TestResult`` code. (Robert Collins)
-
-* New support class ``StreamTagger`` which adds or removes tags from
-  ``StreamResult`` events. (RobertCollins)
-
-* New support class ``StreamToDict`` which converts a ``StreamResult`` to a
-  series of dicts describing a test. Useful for writing trivial stream
-  analysers. (Robert Collins)
-
-* New support class ``TestControl`` which permits cancelling an in-progress
-  run. (Robert Collins)
-
-* New support class ``StreamFailFast`` which calls a ``TestControl`` instance
-  to abort the test run when a failure is detected. (Robert Collins)
-
-* New support class ``ExtendedToStreamDecorator`` which translates both regular
-  unittest TestResult API calls and the ExtendedTestResult API which testtools
-  has supported into the StreamResult API. ExtendedToStreamDecorator also
-  forwards calls made in the StreamResult API, permitting it to be used
-  anywhere a StreamResult is used. Key TestResult query methods like
-  wasSuccessful and shouldStop are synchronised with the StreamResult API
-  calls, but the detailed statistics like the list of errors are not - a
-  separate consumer will be created to support that.
-  (Robert Collins)
-
-* New support class ``StreamToExtendedDecorator`` which translates
-  ``StreamResult`` API calls into ``ExtendedTestResult`` (or any older
-  ``TestResult``) calls. This permits using un-migrated result objects with
-  new runners / tests. (Robert Collins)
-
-* New support class ``StreamToQueue`` for sending messages to one
-  ``StreamResult`` from multiple threads. (Robert Collins)
-
-* New support class ``TimestampingStreamResult`` which adds a timestamp to
-  events with no timestamp. (Robert Collins)
-
-* New ``TestCase`` decorator ``DecorateTestCaseResult`` that adapts the
-  ``TestResult`` or ``StreamResult`` a case will be run with, for ensuring that
-  a particular result object is used even if the runner running the test doesn't
-  know to use it. (Robert Collins)
-
-* New test support class ``testtools.testresult.doubles.StreamResult``, which
-  captures all the StreamResult events. (Robert Collins)
-
-* ``PlaceHolder`` can now hold tags, and applies them before, and removes them
-  after, the test. (Robert Collins)
-
-* ``PlaceHolder`` can now hold timestamps, and applies them before the test and
-  then before the outcome. (Robert Collins)
-
-* ``StreamResultRouter`` added. This is useful for demultiplexing - e.g. for
-  partitioning analysis of events or sending feedback encapsulated in
-  StreamResult events back to their source. (Robert Collins)
-
-* ``testtools.run.TestProgram`` now supports the ``TestRunner`` taking over
-  responsibility for formatting the output of ``--list-tests``.
-  (Robert Collins)
-
-* The error message for setUp and tearDown upcall errors was broken on Python
-  3.4. (Monty Taylor, Robert Collins, #1140688)
-
-* The repr of object() on pypy includes the object id, which was breaking a
-  test that accidentally depended on the CPython repr for object().
-  (Jonathan Lange)
-
-0.9.29
-~~~~~~
-
-A simple bug fix, and better error messages when you don't up-call.
-
-Changes
--------
-
-* ``testtools.content_type.ContentType`` incorrectly used ',' rather than ';'
-  to separate parameters. (Robert Collins)
-
-Improvements
-------------
-
-* ``testtools.compat.unicode_output_stream`` was wrapping a stream encoder
-  around ``io.StringIO`` and ``io.TextIOWrapper`` objects, which was incorrect.
-  (Robert Collins)
-
-* Report the name of the source file for setUp and tearDown upcall errors.
-  (Monty Taylor)
-
-0.9.28
-~~~~~~
-
-Testtools has moved VCS - https://github.com/testing-cabal/testtools/ is
-the new home. Bug tracking is still on Launchpad, and releases are on Pypi.
-
-We made this change to take advantage of the richer ecosystem of tools around
-Git, and to lower the barrier for new contributors.
-
-Improvements
-------------
-
-* New ``testtools.testcase.attr`` and ``testtools.testcase.WithAttributes``
-  helpers allow marking up test case methods with simple labels. This permits
-  filtering tests with more granularity than organising them into modules and
-  test classes. (Robert Collins)
-
-0.9.27
-~~~~~~
-
-Improvements
-------------
-
-* New matcher ``HasLength`` for matching the length of a collection.
-  (Robert Collins)
-
-* New matcher ``MatchesPredicateWithParams`` make it still easier to create
-  ad hoc matchers. (Robert Collins)
-
-* We have a simpler release process in future - see doc/hacking.rst.
-  (Robert Collins)
-
-0.9.26
-~~~~~~
-
-Brown paper bag fix: failed to document the need for setup to be able to use
-extras. Compounded by pip not supporting setup_requires.
-
-Changes
--------
-
-* setup.py now can generate egg_info even if extras is not available.
-  Also lists extras in setup_requires for easy_install.
-  (Robert Collins, #1102464)
-
-0.9.25
-~~~~~~
-
-Changes
--------
-
-* ``python -m testtools.run --load-list`` will now preserve any custom suites
-  (such as ``testtools.FixtureSuite`` or ``testresources.OptimisingTestSuite``)
-  rather than flattening them.
-  (Robert Collins, #827175)
-
-* Testtools now depends on extras, a small library split out from it to contain
-  generally useful non-testing facilities. Since extras has been around for a
-  couple of testtools releases now, we're making this into a hard dependency of
-  testtools. (Robert Collins)
-
-* Testtools now uses setuptools rather than distutils so that we can document
-  the extras dependency. (Robert Collins)
-
-Improvements
-------------
-
-* Testtools will no longer override test code registered details called
-  'traceback' when reporting caught exceptions from test code.
-  (Robert Collins, #812793)
-
-0.9.24
-~~~~~~
-
-Changes
--------
-
-* ``testtools.run discover`` will now sort the tests it discovered. This is a 
-  workaround for http://bugs.python.org/issue16709. Non-standard test suites
-  are preserved, and their ``sort_tests()`` method called (if they have such an
-  attribute). ``testtools.testsuite.sorted_tests(suite, True)`` can be used by
-  such suites to do a local sort. (Robert Collins, #1091512)
-
-* ``ThreadsafeForwardingResult`` now defines a stub ``progress`` method, which
-  fixes ``testr run`` of streams containing progress markers (by discarding the 
-  progress data). (Robert Collins, #1019165)
-
-0.9.23
-~~~~~~
-
-Changes
--------
-
-* ``run.TestToolsTestRunner`` now accepts the verbosity, buffer and failfast
-  arguments the upstream python TestProgram code wants to give it, making it
-  possible to support them in a compatible fashion. (Robert Collins)
-
-Improvements
-------------
-
-* ``testtools.run`` now supports the ``-f`` or ``--failfast`` parameter.
-  Previously it was advertised in the help but ignored.
-  (Robert Collins, #1090582)
-
-* ``AnyMatch`` added, a new matcher that matches when any item in a collection
-  matches the given matcher.  (Jonathan Lange)
-
-* Spelling corrections to documentation.  (Vincent Ladeuil)
-
-* ``TestProgram`` now has a sane default for its ``testRunner`` argument.
-  (Vincent Ladeuil)
-
-* The test suite passes on Python 3 again. (Robert Collins)
-
-0.9.22
-~~~~~~
-
-Improvements
-------------
-
-* ``content_from_file`` and ``content_from_stream`` now accept seek_offset and
-  seek_whence parameters allowing them to be used to grab less than the full
-  stream, or to be used with StringIO streams. (Robert Collins, #1088693)
-
-0.9.21
-~~~~~~
-
-Improvements
-------------
-
-* ``DirContains`` correctly exposed, after being accidentally hidden in the
-  great matcher re-organization of 0.9.17.  (Jonathan Lange)
-
-
-0.9.20
-~~~~~~
-
-Three new matchers that'll rock your world.
-
-Improvements
-------------
-
-* New, powerful matchers that match items in a dictionary:
-
-  - ``MatchesDict``, match every key in a dictionary with a key in a
-    dictionary of matchers.  For when the set of expected keys is equal to the
-    set of observed keys.
-
-  - ``ContainsDict``, every key in a dictionary of matchers must be
-    found in a dictionary, and the values for those keys must match.  For when
-    the set of expected keys is a subset of the set of observed keys.
-
-  - ``ContainedByDict``, every key in a dictionary must be found in
-    a dictionary of matchers.  For when the set of expected keys is a superset
-    of the set of observed keys.
-
-  The names are a little confusing, sorry.  We're still trying to figure out
-  how to present the concept in the simplest way possible.
-
-
-0.9.19
-~~~~~~
-
-How embarrassing!  Three releases in two days.
-
-We've worked out the kinks and have confirmation from our downstreams that
-this is all good.  Should be the last release for a little while.  Please
-ignore 0.9.18 and 0.9.17.
-
-Improvements
-------------
-
-* Include the matcher tests in the release, allowing the tests to run and
-  pass from the release tarball.  (Jonathan Lange)
-
-* Fix cosmetic test failures in Python 3.3, introduced during release 0.9.17.
-  (Jonathan Lange)
-
-
-0.9.18
-~~~~~~
-
-Due to an oversight, release 0.9.18 did not contain the new
-``testtools.matchers`` package and was thus completely broken.  This release
-corrects that, returning us all to normality.
-
-0.9.17
-~~~~~~
-
-This release brings better discover support and Python3.x improvements. There
-are still some test failures on Python3.3 but they are cosmetic - the library
-is as usable there as on any other Python 3 release.
-
-Changes
--------
-
-* The ``testtools.matchers`` package has been split up.  No change to the
-  public interface.  (Jonathan Lange)
-
-Improvements
-------------
-
-* ``python -m testtools.run discover . --list`` now works. (Robert Collins)
-
-* Correctly handling of bytes vs text in JSON content type. (Martin [gz])
-
-
-0.9.16
-~~~~~~
-
-Some new matchers and a new content helper for JSON content.
-
-This is the first release of testtools to drop support for Python 2.4 and 2.5.
-If you need support for either of those versions, please use testtools 0.9.15.
-
-Improvements
-------------
-
-* New content helper, ``json_content`` (Jonathan Lange)
-
-* New matchers:
-
-  * ``ContainsAll`` for asserting one thing is a subset of another
-    (Raphaël Badin)
-
-  * ``SameMembers`` for asserting two iterators have the same members.
-    (Jonathan Lange)
-
-* Reraising of exceptions in Python 3 is more reliable. (Martin [gz])
-
-
-0.9.15
-~~~~~~
-
-This is the last release to support Python2.4 and 2.5. It brings in a slew of
-improvements to test tagging and concurrency, making running large test suites
-with partitioned workers more reliable and easier to reproduce exact test
-ordering in a given worker. See our sister project ``testrepository`` for a
-test runner that uses these features.
-
-Changes
--------
-
-* ``PlaceHolder`` and ``ErrorHolder`` now support being given result details.
-  (Robert Collins)
-
-* ``ErrorHolder`` is now just a function - all the logic is in ``PlaceHolder``.
-  (Robert Collins)
-
-* ``TestResult`` and all other ``TestResult``-like objects in testtools
-  distinguish between global tags and test-local tags, as per the subunit
-  specification.  (Jonathan Lange)
-
-* This is the **last** release of testtools that supports Python 2.4 or 2.5.
-  These releases are no longer supported by the Python community and do not
-  receive security updates. If this affects you, you will need to either
-  stay on this release or perform your own backports.
-  (Jonathan Lange, Robert Collins)
-
-* ``ThreadsafeForwardingResult`` now forwards global tags as test-local tags,
-  making reasoning about the correctness of the multiplexed stream simpler.
-  This preserves the semantic value (what tags apply to a given test) while
-  consuming less stream size (as no negative-tag statement is needed).
-  (Robert Collins, Gary Poster, #986434)
-
-Improvements
-------------
-
-* API documentation corrections. (Raphaël Badin)
-
-* ``ConcurrentTestSuite`` now takes an optional ``wrap_result`` parameter
-  that can be used to wrap the ``ThreadsafeForwardingResults`` created by
-  the suite.  (Jonathan Lange)
-
-* ``Tagger`` added.  It's a new ``TestResult`` that tags all tests sent to
-  it with a particular set of tags.  (Jonathan Lange)
-
-* ``testresultdecorator`` brought over from subunit.  (Jonathan Lange)
-
-* All ``TestResult`` wrappers now correctly forward ``current_tags`` from
-  their wrapped results, meaning that ``current_tags`` can always be relied
-  upon to return the currently active tags on a test result.
-
-* ``TestByTestResult``, a ``TestResult`` that calls a method once per test,
-  added.  (Jonathan Lange)
-
-* ``ThreadsafeForwardingResult`` correctly forwards ``tags()`` calls where
-  only one of ``new_tags`` or ``gone_tags`` are specified.
-  (Jonathan Lange, #980263)
-
-* ``ThreadsafeForwardingResult`` no longer leaks local tags from one test
-  into all future tests run.  (Jonathan Lange, #985613)
-
-* ``ThreadsafeForwardingResult`` has many, many more tests.  (Jonathan Lange)
-
-
-0.9.14
-~~~~~~
-
-Our sister project, `subunit <https://launchpad.net/subunit>`_, was using a
-private API that was deleted in the 0.9.13 release.  This release restores
-that API in order to smooth out the upgrade path.
-
-If you don't use subunit, then this release won't matter very much to you.
-
-
-0.9.13
-~~~~~~
-
-Plenty of new matchers and quite a few critical bug fixes (especially to do
-with stack traces from failed assertions).  A net win for all.
-
-Changes
--------
-
-* ``MatchesAll`` now takes an ``first_only`` keyword argument that changes how
-  mismatches are displayed.  If you were previously passing matchers to
-  ``MatchesAll`` with keyword arguments, then this change might affect your
-  test results.  (Jonathan Lange)
-
-Improvements
-------------
-
-* Actually hide all of the testtools stack for assertion failures. The
-  previous release promised clean stack, but now we actually provide it.
-  (Jonathan Lange, #854769)
-
-* ``assertRaises`` now includes the ``repr`` of the callable that failed to raise
-  properly. (Jonathan Lange, #881052)
-
-* Asynchronous tests no longer hang when run with trial.
-  (Jonathan Lange, #926189)
-
-* ``Content`` objects now have an ``as_text`` method to convert their contents
-  to Unicode text.  (Jonathan Lange)
-
-* Failed equality assertions now line up. (Jonathan Lange, #879339)
-
-* ``FullStackRunTest`` no longer aborts the test run if a test raises an
-  error.  (Jonathan Lange)
-
-* ``MatchesAll`` and ``MatchesListwise`` both take a ``first_only`` keyword
-  argument.  If True, they will report only on the first mismatch they find,
-  and not continue looking for other possible mismatches.
-  (Jonathan Lange)
-
-* New helper, ``Nullary`` that turns callables with arguments into ones that
-  don't take arguments.  (Jonathan Lange)
-
-* New matchers:
-
-  * ``DirContains`` matches the contents of a directory.
-    (Jonathan Lange, James Westby)
-
-  * ``DirExists`` matches if a directory exists.
-    (Jonathan Lange, James Westby)
-
-  * ``FileContains`` matches the contents of a file.
-    (Jonathan Lange, James Westby)
-
-  * ``FileExists`` matches if a file exists.
-    (Jonathan Lange, James Westby)
-
-  * ``HasPermissions`` matches the permissions of a file.  (Jonathan Lange)
-
-  * ``MatchesPredicate`` matches if a predicate is true.  (Jonathan Lange)
-
-  * ``PathExists`` matches if a path exists.  (Jonathan Lange, James Westby)
-
-  * ``SamePath`` matches if two paths are the same.  (Jonathan Lange)
-
-  * ``TarballContains`` matches the contents of a tarball.  (Jonathan Lange)
-
-* ``MultiTestResult`` supports the ``tags`` method.
-  (Graham Binns, Francesco Banconi, #914279)
-
-* ``ThreadsafeForwardingResult`` supports the ``tags`` method.
-  (Graham Binns, Francesco Banconi, #914279)
-
-* ``ThreadsafeForwardingResult`` no longer includes semaphore acquisition time
-  in the test duration (for implicitly timed test runs).
-  (Robert Collins, #914362)
-
-0.9.12
-~~~~~~
-
-This is a very big release.  We've made huge improvements on three fronts:
- 1. Test failures are way nicer and easier to read
- 2. Matchers and ``assertThat`` are much more convenient to use
- 3. Correct handling of extended unicode characters
-
-We've trimmed off the fat from the stack trace you get when tests fail, we've
-cut out the bits of error messages that just didn't help, we've made it easier
-to annotate mismatch failures, to compare complex objects and to match raised
-exceptions.
-
-Testing code was never this fun.
-
-Changes
--------
-
-* ``AfterPreproccessing`` renamed to ``AfterPreprocessing``, which is a more
-  correct spelling.  Old name preserved for backwards compatibility, but is
-  now deprecated.  Please stop using it.
-  (Jonathan Lange, #813460)
-
-* ``assertThat`` raises ``MismatchError`` instead of
-  ``TestCase.failureException``.  ``MismatchError`` is a subclass of
-  ``AssertionError``, so in most cases this change will not matter. However,
-  if ``self.failureException`` has been set to a non-default value, then
-  mismatches will become test errors rather than test failures.
-
-* ``gather_details`` takes two dicts, rather than two detailed objects.
-  (Jonathan Lange, #801027)
-
-* ``MatchesRegex`` mismatch now says "<value> does not match /<regex>/" rather
-  than "<regex> did not match <value>". The regular expression contains fewer
-  backslashes too. (Jonathan Lange, #818079)
-
-* Tests that run with ``AsynchronousDeferredRunTest`` now have the ``reactor``
-  attribute set to the running reactor. (Jonathan Lange, #720749)
-
-Improvements
-------------
-
-* All public matchers are now in ``testtools.matchers.__all__``.
-  (Jonathan Lange, #784859)
-
-* ``assertThat`` can actually display mismatches and matchers that contain
-  extended unicode characters. (Jonathan Lange, Martin [gz], #804127)
-
-* ``assertThat`` output is much less verbose, displaying only what the mismatch
-  tells us to display. Old-style verbose output can be had by passing
-  ``verbose=True`` to assertThat. (Jonathan Lange, #675323, #593190)
-
-* ``assertThat`` accepts a message which will be used to annotate the matcher.
-  This can be given as a third parameter or as a keyword parameter.
-  (Robert Collins)
-
-* Automated the Launchpad part of the release process.
-  (Jonathan Lange, #623486)
-
-* Correctly display non-ASCII unicode output on terminals that claim to have a
-  unicode encoding. (Martin [gz], #804122)
-
-* ``DocTestMatches`` correctly handles unicode output from examples, rather
-  than raising an error. (Martin [gz], #764170)
-
-* ``ErrorHolder`` and ``PlaceHolder`` added to docs. (Jonathan Lange, #816597)
-
-* ``ExpectedException`` now matches any exception of the given type by
-  default, and also allows specifying a ``Matcher`` rather than a mere regular
-  expression. (Jonathan Lange, #791889)
-
-* ``FixtureSuite`` added, allows test suites to run with a given fixture.
-  (Jonathan Lange)
-
-* Hide testtools's own stack frames when displaying tracebacks, making it
-  easier for test authors to focus on their errors.
-  (Jonathan Lange, Martin [gz], #788974)
-
-* Less boilerplate displayed in test failures and errors.
-  (Jonathan Lange, #660852)
-
-* ``MatchesException`` now allows you to match exceptions against any matcher,
-  rather than just regular expressions.  (Jonathan Lange, #791889)
-
-* ``MatchesException`` now permits a tuple of types rather than a single type
-  (when using the type matching mode).  (Robert Collins)
-
-* ``MatchesStructure.byEquality`` added to make the common case of matching
-  many attributes by equality much easier.  ``MatchesStructure.byMatcher``
-  added in case folk want to match by things other than equality.
-  (Jonathan Lange)
-
-* New convenience assertions, ``assertIsNone`` and ``assertIsNotNone``.
-  (Christian Kampka)
-
-* New matchers:
-
-  * ``AllMatch`` matches many values against a single matcher.
-    (Jonathan Lange, #615108)
-
-  * ``Contains``. (Robert Collins)
-
-  * ``GreaterThan``. (Christian Kampka)
-
-* New helper, ``safe_hasattr`` added. (Jonathan Lange)
-
-* ``reraise`` added to ``testtools.compat``. (Jonathan Lange)
-
-
-0.9.11
-~~~~~~
-
-This release brings consistent use of super for better compatibility with
-multiple inheritance, fixed Python3 support, improvements in fixture and mather
-outputs and a compat helper for testing libraries that deal with bytestrings.
-
-Changes
--------
-
-* ``TestCase`` now uses super to call base ``unittest.TestCase`` constructor,
-  ``setUp`` and ``tearDown``. (Tim Cole, #771508)
-
-* If, when calling ``useFixture`` an error occurs during fixture set up, we
-  still attempt to gather details from the fixture. (Gavin Panella)
-
-
-Improvements
-------------
-
-* Additional compat helper for ``BytesIO`` for libraries that build on
-  testtools and are working on Python 3 porting. (Robert Collins)
-
-* Corrected documentation for ``MatchesStructure`` in the test authors
-  document.  (Jonathan Lange)
-
-* ``LessThan`` error message now says something that is logically correct.
-  (Gavin Panella, #762008)
-
-* Multiple details from a single fixture are now kept separate, rather than
-  being mooshed together. (Gavin Panella, #788182)
-
-* Python 3 support now back in action. (Martin [gz], #688729)
-
-* ``try_import`` and ``try_imports`` have a callback that is called whenever
-  they fail to import a module.  (Martin Pool)
-
-
-0.9.10
-~~~~~~
-
-The last release of testtools could not be easy_installed.  This is considered
-severe enough for a re-release.
-
-Improvements
-------------
-
-* Include ``doc/`` in the source distribution, making testtools installable
-  from PyPI again (Tres Seaver, #757439)
-
-
-0.9.9
-~~~~~
-
-Many, many new matchers, vastly expanded documentation, stacks of bug fixes,
-better unittest2 integration.  If you've ever wanted to try out testtools but
-been afraid to do so, this is the release to try.
-
-
-Changes
--------
-
-* The timestamps generated by ``TestResult`` objects when no timing data has
-  been received are now datetime-with-timezone, which allows them to be
-  sensibly serialised and transported. (Robert Collins, #692297)
-
-Improvements
-------------
-
-* ``AnnotatedMismatch`` now correctly returns details.
-  (Jonathan Lange, #724691)
-
-* distutils integration for the testtools test runner. Can now use it for
-  'python setup.py test'. (Christian Kampka, #693773)
-
-* ``EndsWith`` and ``KeysEqual`` now in testtools.matchers.__all__.
-  (Jonathan Lange, #692158)
-
-* ``MatchesException`` extended to support a regular expression check against
-  the str() of a raised exception.  (Jonathan Lange)
-
-* ``MultiTestResult`` now forwards the ``time`` API. (Robert Collins, #692294)
-
-* ``MultiTestResult`` now documented in the manual. (Jonathan Lange, #661116)
-
-* New content helpers ``content_from_file``, ``content_from_stream`` and
-  ``attach_file`` make it easier to attach file-like objects to a
-  test. (Jonathan Lange, Robert Collins, #694126)
-
-* New ``ExpectedException`` context manager to help write tests against things
-  that are expected to raise exceptions. (Aaron Bentley)
-
-* New matchers:
-
-  * ``MatchesListwise`` matches an iterable of matchers against an iterable
-    of values. (Michael Hudson-Doyle)
-
-  * ``MatchesRegex`` matches a string against a regular expression.
-    (Michael Hudson-Doyle)
-
-  * ``MatchesStructure`` matches attributes of an object against given
-    matchers.  (Michael Hudson-Doyle)
-
-  * ``AfterPreproccessing`` matches values against a matcher after passing them
-    through a callable.  (Michael Hudson-Doyle)
-
-  * ``MatchesSetwise`` matches an iterable of matchers against an iterable of
-    values, without regard to order.  (Michael Hudson-Doyle)
-
-* ``setup.py`` can now build a snapshot when Bazaar is installed but the tree
-  is not a Bazaar tree. (Jelmer Vernooij)
-
-* Support for running tests using distutils (Christian Kampka, #726539)
-
-* Vastly improved and extended documentation. (Jonathan Lange)
-
-* Use unittest2 exception classes if available. (Jelmer Vernooij)
-
-
-0.9.8
-~~~~~
-
-In this release we bring some very interesting improvements:
-
-* new matchers for exceptions, sets, lists, dicts and more.
-
-* experimental (works but the contract isn't supported) twisted reactor
-  support.
-
-* The built in runner can now list tests and filter tests (the -l and
-  --load-list options).
-
-Changes
--------
-
-* addUnexpectedSuccess is translated to addFailure for test results that don't
-  know about addUnexpectedSuccess.  Further, it fails the entire result for
-  all testtools TestResults (i.e. wasSuccessful() returns False after
-  addUnexpectedSuccess has been called). Note that when using a delegating
-  result such as ThreadsafeForwardingResult, MultiTestResult or
-  ExtendedToOriginalDecorator then the behaviour of addUnexpectedSuccess is
-  determined by the delegated to result(s).
-  (Jonathan Lange, Robert Collins, #654474, #683332)
-
-* startTestRun will reset any errors on the result.  That is, wasSuccessful()
-  will always return True immediately after startTestRun() is called. This
-  only applies to delegated test results (ThreadsafeForwardingResult,
-  MultiTestResult and ExtendedToOriginalDecorator) if the delegated to result
-  is a testtools test result - we cannot reliably reset the state of unknown
-  test result class instances. (Jonathan Lange, Robert Collins, #683332)
-
-* Responsibility for running test cleanups has been moved to ``RunTest``.
-  This change does not affect public APIs and can be safely ignored by test
-  authors.  (Jonathan Lange, #662647)
-
-Improvements
-------------
-
-* New matchers:
-
-  * ``EndsWith`` which complements the existing ``StartsWith`` matcher.
-    (Jonathan Lange, #669165)
-
-  * ``MatchesException`` matches an exception class and parameters. (Robert
-    Collins)
-
-  * ``KeysEqual`` matches a dictionary with particular keys.  (Jonathan Lange)
-
-* ``assertIsInstance`` supports a custom error message to be supplied, which
-  is necessary when using ``assertDictEqual`` on Python 2.7 with a
-  ``testtools.TestCase`` base class. (Jelmer Vernooij)
-
-* Experimental support for running tests that return Deferreds.
-  (Jonathan Lange, Martin [gz])
-
-* Provide a per-test decorator, run_test_with, to specify which RunTest
-  object to use for a given test.  (Jonathan Lange, #657780)
-
-* Fix the runTest parameter of TestCase to actually work, rather than raising
-  a TypeError.  (Jonathan Lange, #657760)
-
-* Non-release snapshots of testtools will now work with buildout.
-  (Jonathan Lange, #613734)
-
-* Malformed SyntaxErrors no longer blow up the test suite.  (Martin [gz])
-
-* ``MismatchesAll.describe`` no longer appends a trailing newline.
-  (Michael Hudson-Doyle, #686790)
-
-* New helpers for conditionally importing modules, ``try_import`` and
-  ``try_imports``.  (Jonathan Lange)
-
-* ``Raises`` added to the ``testtools.matchers`` module - matches if the
-  supplied callable raises, and delegates to an optional matcher for validation
-  of the exception. (Robert Collins)
-
-* ``raises`` added to the ``testtools.matchers`` module - matches if the
-  supplied callable raises and delegates to ``MatchesException`` to validate
-  the exception. (Jonathan Lange)
-
-* Tests will now pass on Python 2.6.4 : an ``Exception`` change made only in
-  2.6.4 and reverted in Python 2.6.5 was causing test failures on that version.
-  (Martin [gz], #689858).
-
-* ``testtools.TestCase.useFixture`` has been added to glue with fixtures nicely.
-  (Robert Collins)
-
-* ``testtools.run`` now supports ``-l`` to list tests rather than executing
-  them. This is useful for integration with external test analysis/processing
-  tools like subunit and testrepository. (Robert Collins)
-
-* ``testtools.run`` now supports ``--load-list``, which takes a file containing
-  test ids, one per line, and intersects those ids with the tests found. This
-  allows fine grained control of what tests are run even when the tests cannot
-  be named as objects to import (e.g. due to test parameterisation via
-  testscenarios). (Robert Collins)
-
-* Update documentation to say how to use testtools.run() on Python 2.4.
-  (Jonathan Lange, #501174)
-
-* ``text_content`` conveniently converts a Python string to a Content object.
-  (Jonathan Lange, James Westby)
-
-
-
-0.9.7
-~~~~~
-
-Lots of little cleanups in this release; many small improvements to make your
-testing life more pleasant.
-
-Improvements
-------------
-
-* Cleanups can raise ``testtools.MultipleExceptions`` if they have multiple
-  exceptions to report. For instance, a cleanup which is itself responsible for
-  running several different internal cleanup routines might use this.
-
-* Code duplication between assertEqual and the matcher Equals has been removed.
-
-* In normal circumstances, a TestCase will no longer share details with clones
-  of itself. (Andrew Bennetts, bug #637725)
-
-* Less exception object cycles are generated (reduces peak memory use between
-  garbage collection). (Martin [gz])
-
-* New matchers 'DoesNotStartWith' and 'StartsWith' contributed by Canonical
-  from the Launchpad project. Written by James Westby.
-
-* Timestamps as produced by subunit protocol clients are now forwarded in the
-  ThreadsafeForwardingResult so correct test durations can be reported.
-  (Martin [gz], Robert Collins, #625594)
-
-* With unittest from Python 2.7 skipped tests will now show only the reason
-  rather than a serialisation of all details. (Martin [gz], #625583)
-
-* The testtools release process is now a little better documented and a little
-  smoother.  (Jonathan Lange, #623483, #623487)
-
-
-0.9.6
-~~~~~
-
-Nothing major in this release, just enough small bits and pieces to make it
-useful enough to upgrade to.
-
-In particular, a serious bug in assertThat() has been fixed, it's easier to
-write Matchers, there's a TestCase.patch() method for those inevitable monkey
-patches and TestCase.assertEqual gives slightly nicer errors.
-
-Improvements
-------------
-
-* 'TestCase.assertEqual' now formats errors a little more nicely, in the
-  style of bzrlib.
-
-* Added `PlaceHolder` and `ErrorHolder`, TestCase-like objects that can be
-  used to add results to a `TestResult`.
-
-* 'Mismatch' now takes optional description and details parameters, so
-  custom Matchers aren't compelled to make their own subclass.
-
-* jml added a built-in UTF8_TEXT ContentType to make it slightly easier to
-  add details to test results. See bug #520044.
-
-* Fix a bug in our built-in matchers where assertThat would blow up if any
-  of them failed. All built-in mismatch objects now provide get_details().
-
-* New 'Is' matcher, which lets you assert that a thing is identical to
-  another thing.
-
-* New 'LessThan' matcher which lets you assert that a thing is less than
-  another thing.
-
-* TestCase now has a 'patch()' method to make it easier to monkey-patching
-  objects in tests. See the manual for more information. Fixes bug #310770.
-
-* MultiTestResult methods now pass back return values from the results it
-  forwards to.
-
-0.9.5
-~~~~~
-
-This release fixes some obscure traceback formatting issues that probably
-weren't affecting you but were certainly breaking our own test suite.
-
-Changes
--------
-
-* Jamu Kakar has updated classes in testtools.matchers and testtools.runtest
-  to be new-style classes, fixing bug #611273.
-
-Improvements
-------------
-
-* Martin[gz] fixed traceback handling to handle cases where extract_tb returns
-  a source line of None. Fixes bug #611307.
-
-* Martin[gz] fixed an unicode issue that was causing the tests to fail,
-  closing bug #604187.
-
-* testtools now handles string exceptions (although why would you want to use
-  them?) and formats their tracebacks correctly. Thanks to Martin[gz] for
-  fixing bug #592262.
-
-0.9.4
-~~~~~
-
-This release overhauls the traceback formatting layer to deal with Python 2
-line numbers and traceback objects often being local user encoded strings
-rather than unicode objects. Test discovery has also been added and Python 3.1
-is also supported. Finally, the Mismatch protocol has been extended to let
-Matchers collaborate with tests in supplying detailed data about failures.
-
-Changes
--------
-
-* testtools.utils has been renamed to testtools.compat. Importing
-  testtools.utils will now generate a deprecation warning.
-
-Improvements
-------------
-
-* Add machinery for Python 2 to create unicode tracebacks like those used by
-  Python 3. This means testtools no longer throws on encountering non-ascii
-  filenames, source lines, or exception strings when displaying test results.
-  Largely contributed by Martin[gz] with some tweaks from Robert Collins.
-
-* James Westby has supplied test discovery support using the Python 2.7
-  TestRunner in testtools.run. This requires the 'discover' module. This
-  closes bug #250764.
-
-* Python 3.1 is now supported, thanks to Martin[gz] for a partial patch.
-  This fixes bug #592375.
-
-* TestCase.addCleanup has had its docstring corrected about when cleanups run.
-
-* TestCase.skip is now deprecated in favour of TestCase.skipTest, which is the
-  Python2.7 spelling for skip. This closes bug #560436.
-
-* Tests work on IronPython patch from Martin[gz] applied.
-
-* Thanks to a patch from James Westby testtools.matchers.Mismatch can now
-  supply a get_details method, which assertThat will query to provide
-  additional attachments. This can be used to provide additional detail
-  about the mismatch that doesn't suite being included in describe(). For
-  instance, if the match process was complex, a log of the process could be
-  included, permitting debugging.
-
-* testtools.testresults.real._StringException will now answer __str__ if its
-  value is unicode by encoding with UTF8, and vice versa to answer __unicode__.
-  This permits subunit decoded exceptions to contain unicode and still format
-  correctly.
-
-0.9.3
-~~~~~
-
-More matchers, Python 2.4 support, faster test cloning by switching to copy
-rather than deepcopy and better output when exceptions occur in cleanups are
-the defining characteristics of this release.
-
-Improvements
-------------
-
-* New matcher "Annotate" that adds a simple string message to another matcher,
-  much like the option 'message' parameter to standard library assertFoo
-  methods.
-
-* New matchers "Not" and "MatchesAll". "Not" will invert another matcher, and
-  "MatchesAll" that needs a successful match for all of its arguments.
-
-* On Python 2.4, where types.FunctionType cannot be deepcopied, testtools will
-  now monkeypatch copy._deepcopy_dispatch using the same trivial patch that
-  added such support to Python 2.5. The monkey patch is triggered by the
-  absence of FunctionType from the dispatch dict rather than a version check.
-  Bug #498030.
-
-* On windows the test 'test_now_datetime_now' should now work reliably.
-
-* TestCase.getUniqueInteger and TestCase.getUniqueString now have docstrings.
-
-* TestCase.getUniqueString now takes an optional prefix parameter, so you can
-  now use it in circumstances that forbid strings with '.'s, and such like.
-
-* testtools.testcase.clone_test_with_new_id now uses copy.copy, rather than
-  copy.deepcopy. Tests that need a deeper copy should use the copy protocol to
-  control how they are copied. Bug #498869.
-
-* The backtrace test result output tests should now pass on windows and other
-  systems where os.sep is not '/'.
-
-* When a cleanUp or tearDown exception occurs, it is now accumulated as a new
-  traceback in the test details, rather than as a separate call to addError / 
-  addException. This makes testtools work better with most TestResult objects
-  and fixes bug #335816.
-
-
-0.9.2
-~~~~~
-
-Python 3 support, more matchers and better consistency with Python 2.7 --
-you'd think that would be enough for a point release. Well, we here on the
-testtools project think that you deserve more.
-
-We've added a hook so that user code can be called just-in-time whenever there
-is an exception, and we've also factored out the "run" logic of test cases so
-that new outcomes can be added without fiddling with the actual flow of logic.
-
-It might sound like small potatoes, but it's changes like these that will
-bring about the end of test frameworks.
-
-
-Improvements
-------------
-
-* A failure in setUp and tearDown now report as failures not as errors.
-
-* Cleanups now run after tearDown to be consistent with Python 2.7's cleanup
-  feature.
-
-* ExtendedToOriginalDecorator now passes unrecognised attributes through
-  to the decorated result object, permitting other extensions to the
-  TestCase -> TestResult protocol to work.
-
-* It is now possible to trigger code just-in-time after an exception causes
-  a test outcome such as failure or skip. See the testtools MANUAL or
-  ``pydoc testtools.TestCase.addOnException``. (bug #469092)
-
-* New matcher Equals which performs a simple equality test.
-
-* New matcher MatchesAny which looks for a match of any of its arguments.
-
-* TestCase no longer breaks if a TestSkipped exception is raised with no
-  parameters.
-
-* TestCase.run now clones test cases before they are run and runs the clone.
-  This reduces memory footprint in large test runs - state accumulated on
-  test objects during their setup and execution gets freed when test case
-  has finished running unless the TestResult object keeps a reference.
-  NOTE: As test cloning uses deepcopy, this can potentially interfere if
-  a test suite has shared state (such as the testscenarios or testresources
-  projects use).  Use the __deepcopy__ hook to control the copying of such
-  objects so that the shared references stay shared.
-
-* Testtools now accepts contributions without copyright assignment under some
-  circumstances. See HACKING for details.
-
-* Testtools now provides a convenient way to run a test suite using the
-  testtools result object: python -m testtools.run testspec [testspec...].
-
-* Testtools now works on Python 3, thanks to Benjamin Peterson.
-
-* Test execution now uses a separate class, testtools.RunTest to run single
-  tests. This can be customised and extended in a more consistent fashion than
-  the previous run method idiom. See pydoc for more information.
-
-* The test doubles that testtools itself uses are now available as part of
-  the testtools API in testtols.testresult.doubles.
-
-* TracebackContent now sets utf8 as the charset encoding, rather than not
-  setting one and encoding with the default encoder.
-
-* With python2.7 testtools.TestSkipped will be the unittest.case.SkipTest
-  exception class making skips compatible with code that manually raises the
-  standard library exception. (bug #490109)
-
-Changes
--------
-
-* TestCase.getUniqueInteger is now implemented using itertools.count. Thanks
-  to Benjamin Peterson for the patch. (bug #490111)
-
-
-0.9.1
-~~~~~
-
-The new matcher API introduced in 0.9.0 had a small flaw where the matchee
-would be evaluated twice to get a description of the mismatch. This could lead
-to bugs if the act of matching caused side effects to occur in the matchee.
-Since having such side effects isn't desirable, we have changed the API now
-before it has become widespread.
-
-Changes
--------
-
-* Matcher API changed to avoid evaluating matchee twice. Please consult
-  the API documentation.
-
-* TestCase.getUniqueString now uses the test id, not the test method name,
-  which works nicer with parameterised tests.
-
-Improvements
-------------
-
-* Python2.4 is now supported again.
-
-
-0.9.0
-~~~~~
-
-This release of testtools is perhaps the most interesting and exciting one
-it's ever had. We've continued in bringing together the best practices of unit
-testing from across a raft of different Python projects, but we've also
-extended our mission to incorporating unit testing concepts from other
-languages and from our own research, led by Robert Collins.
-
-We now support skipping and expected failures. We'll make sure that you
-up-call setUp and tearDown, avoiding unexpected testing weirdnesses. We're
-now compatible with Python 2.5, 2.6 and 2.7 unittest library.
-
-All in all, if you are serious about unit testing and want to get the best
-thinking from the whole Python community, you should get this release.
-
-Improvements
-------------
-
-* A new TestResult API has been added for attaching details to test outcomes.
-  This API is currently experimental, but is being prepared with the intent
-  of becoming an upstream Python API. For more details see pydoc
-  testtools.TestResult and the TestCase addDetail / getDetails methods.
-
-* assertThat has been added to TestCase. This new assertion supports
-  a hamcrest-inspired matching protocol. See pydoc testtools.Matcher for
-  details about writing matchers, and testtools.matchers for the included
-  matchers. See http://code.google.com/p/hamcrest/.
-
-* Compatible with Python 2.6 and Python 2.7
-
-* Failing to upcall in setUp or tearDown will now cause a test failure.
-  While the base methods do nothing, failing to upcall is usually a problem
-  in deeper hierarchies, and checking that the root method is called is a
-  simple way to catch this common bug.
-
-* New TestResult decorator ExtendedToOriginalDecorator which handles
-  downgrading extended API calls like addSkip to older result objects that
-  do not support them. This is used internally to make testtools simpler but
-  can also be used to simplify other code built on or for use with testtools.
-
-* New TextTestResult supporting the extended APIs that testtools provides.
-
-* Nose will no longer find 'runTest' tests in classes derived from
-   testtools.testcase.TestCase (bug #312257).
-
-* Supports the Python 2.7/3.1 addUnexpectedSuccess and addExpectedFailure
-  TestResult methods, with a support function 'knownFailure' to let tests
-  trigger these outcomes.
-
-* When using the skip feature with TestResult objects that do not support it
-  a test success will now be reported. Previously an error was reported but
-  production experience has shown that this is too disruptive for projects that
-  are using skips: they cannot get a clean run on down-level result objects.
-
-
-.. _testtools: http://pypi.python.org/pypi/testtools
diff --git a/lib/testtools/README.rst b/lib/testtools/README.rst
deleted file mode 100644
index cddb594..0000000
--- a/lib/testtools/README.rst
+++ /dev/null
@@ -1,92 +0,0 @@
-=========
-testtools
-=========
-
-testtools is a set of extensions to the Python standard library's unit testing
-framework.
-
-These extensions have been derived from years of experience with unit testing
-in Python and come from many different sources.
-
-
-Documentation
--------------
-
-If you would like to learn more about testtools, consult our documentation in
-the 'doc/' directory.  You might like to start at 'doc/overview.rst' or
-'doc/for-test-authors.rst'.
-
-
-Licensing
----------
-
-This project is distributed under the MIT license and copyright is owned by
-Jonathan M. Lange and the testtools authors. See LICENSE for details.
-
-Some code in 'testtools/run.py' is taken from Python's unittest module, and is
-copyright Steve Purcell and the Python Software Foundation, it is distributed
-under the same license as Python, see LICENSE for details.
-
-
-Required Dependencies
----------------------
-
- * Python 2.6+ or 3.0+
-
-If you would like to use testtools for earlier Python's, please use testtools
-0.9.15.
-
- * extras (helpers that we intend to push into Python itself in the near
-   future).
-
-
-Optional Dependencies
----------------------
-
-If you would like to use our undocumented, unsupported Twisted support, then
-you will need Twisted.
-
-If you want to use ``fixtures`` then you can either install fixtures (e.g. from
-https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
-or alternatively just make sure your fixture objects obey the same protocol.
-
-
-Bug reports and patches
------------------------
-
-Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
-Patches should be submitted as Github pull requests, or mailed to the authors.
-See ``doc/hacking.rst`` for more details.
-
-There's no mailing list for this project yet, however the testing-in-python
-mailing list may be a useful resource:
-
- * Address: testing-in-python at lists.idyll.org
- * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
-
-
-History
--------
-
-testtools used to be called 'pyunit3k'.  The name was changed to avoid
-conflating the library with the Python 3.0 release (commonly referred to as
-'py3k').
-
-
-Thanks
-------
-
- * Canonical Ltd
- * Bazaar
- * Twisted Matrix Labs
- * Robert Collins
- * Andrew Bennetts
- * Benjamin Peterson
- * Jamu Kakar
- * James Westby
- * Martin [gz]
- * Michael Hudson-Doyle
- * Aaron Bentley
- * Christian Kampka
- * Gavin Panella
- * Martin Pool
diff --git a/lib/testtools/doc/Makefile b/lib/testtools/doc/Makefile
deleted file mode 100644
index b5d07af..0000000
--- a/lib/testtools/doc/Makefile
+++ /dev/null
@@ -1,89 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html      to make standalone HTML files"
-	@echo "  dirhtml   to make HTML files named index.html in directories"
-	@echo "  pickle    to make pickle files"
-	@echo "  json      to make JSON files"
-	@echo "  htmlhelp  to make HTML files and a HTML help project"
-	@echo "  qthelp    to make HTML files and a qthelp project"
-	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  changes   to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck to check all external links for integrity"
-	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	-rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/testtools.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/testtools.qhc"
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-	      "run these through (pdf)latex."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/lib/testtools/doc/_static/placeholder.txt b/lib/testtools/doc/_static/placeholder.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/lib/testtools/doc/_templates/placeholder.txt b/lib/testtools/doc/_templates/placeholder.txt
deleted file mode 100644
index e69de29..0000000
diff --git a/lib/testtools/doc/api.rst b/lib/testtools/doc/api.rst
deleted file mode 100644
index 425c818..0000000
--- a/lib/testtools/doc/api.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-testtools API documentation
-===========================
-
-Generated reference documentation for all the public functionality of
-testtools.
-
-Please :doc:`send patches </hacking>` if you notice anything confusing or
-wrong, or that could be improved.
-
-
-.. toctree::
-   :maxdepth: 2
-
-
-testtools
----------
-
-.. automodule:: testtools
-   :members:
-
-
-testtools.matchers
-------------------
-
-.. automodule:: testtools.matchers
-   :members:
diff --git a/lib/testtools/doc/conf.py b/lib/testtools/doc/conf.py
deleted file mode 100644
index de5fdd4..0000000
--- a/lib/testtools/doc/conf.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# testtools documentation build configuration file, created by
-# sphinx-quickstart on Sun Nov 28 13:45:40 2010.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.append(os.path.abspath('.'))
-
-# -- General configuration -----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'testtools'
-copyright = u'2010, The testtools authors'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = 'VERSION'
-# The full version, including alpha/beta/rc tags.
-release = 'VERSION'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of documents that shouldn't be included in the build.
-#unused_docs = []
-
-# List of directories, relative to source directory, that shouldn't be searched
-# for source files.
-exclude_trees = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_use_modindex = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'testtoolsdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-#latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-#latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
-  ('index', 'testtools.tex', u'testtools Documentation',
-   u'The testtools authors', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# Additional stuff for the LaTeX preamble.
-#latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_use_modindex = True
diff --git a/lib/testtools/doc/for-framework-folk.rst b/lib/testtools/doc/for-framework-folk.rst
deleted file mode 100644
index 5c83ab1..0000000
--- a/lib/testtools/doc/for-framework-folk.rst
+++ /dev/null
@@ -1,453 +0,0 @@
-============================
-testtools for framework folk
-============================
-
-Introduction
-============
-
-In addition to having many features :doc:`for test authors
-<for-test-authors>`, testtools also has many bits and pieces that are useful
-for folk who write testing frameworks.
-
-If you are the author of a test runner, are working on a very large
-unit-tested project, are trying to get one testing framework to play nicely
-with another or are hacking away at getting your test suite to run in parallel
-over a heterogenous cluster of machines, this guide is for you.
-
-This manual is a summary. You can get details by consulting the
-:doc:`testtools API docs </api>`.
-
-
-Extensions to TestCase
-======================
-
-In addition to the ``TestCase`` specific methods, we have extensions for
-``TestSuite`` that also apply to ``TestCase`` (because ``TestCase`` and
-``TestSuite`` follow the Composite pattern).
-
-Custom exception handling
--------------------------
-
-testtools provides a way to control how test exceptions are handled.  To do
-this, add a new exception to ``self.exception_handlers`` on a
-``testtools.TestCase``.  For example::
-
-    >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
-
-Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
-``ExceptionClass``, ``handler`` will be called with the test case, test result
-and the raised exception.
-
-Use this if you want to add a new kind of test result, that is, if you think
-that ``addError``, ``addFailure`` and so forth are not enough for your needs.
-
-
-Controlling test execution
---------------------------
-
-If you want to control more than just how exceptions are raised, you can
-provide a custom ``RunTest`` to a ``TestCase``.  The ``RunTest`` object can
-change everything about how the test executes.
-
-To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
-takes a test and an optional list of exception handlers and an optional
-last_resort handler.  Instances returned by the factory must have a ``run()``
-method that takes an optional ``TestResult`` object.
-
-The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
-method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
-way that Python's standard unittest_ does.
-
-To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
-like this::
-
-  class SomeTests(TestCase):
-      run_tests_with = CustomRunTestFactory
-
-To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
-
-  class SomeTests(TestCase):
-      @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
-      def test_something(self):
-          pass
-
-In addition, either of these can be overridden by passing a factory in to the
-``TestCase`` constructor with the optional ``runTest`` argument.
-
-
-Test renaming
--------------
-
-``testtools.clone_test_with_new_id`` is a function to copy a test case
-instance to one with a new name.  This is helpful for implementing test
-parameterization.
-
-.. _force_failure:
-
-Delayed Test Failure
---------------------
-
-Setting the ``testtools.TestCase.force_failure`` instance variable to True will
-cause ``testtools.RunTest`` to fail the test case after the test has finished.
-This is useful when you want to cause a test to fail, but don't want to
-prevent the remainder of the test code from being executed.
-
-Test placeholders
-=================
-
-Sometimes, it's useful to be able to add things to a test suite that are not
-actually tests.  For example, you might wish to represents import failures
-that occur during test discovery as tests, so that your test result object
-doesn't have to do special work to handle them nicely.
-
-testtools provides two such objects, called "placeholders": ``PlaceHolder``
-and ``ErrorHolder``.  ``PlaceHolder`` takes a test id and an optional
-description.  When it's run, it succeeds.  ``ErrorHolder`` takes a test id,
-and error and an optional short description.  When it's run, it reports that
-error.
-
-These placeholders are best used to log events that occur outside the test
-suite proper, but are still very relevant to its results.
-
-e.g.::
-
-  >>> suite = TestSuite()
-  >>> suite.add(PlaceHolder('I record an event'))
-  >>> suite.run(TextTestResult(verbose=True))
-  I record an event                                                   [OK]
-
-
-Test instance decorators
-========================
-
-DecorateTestCaseResult
-----------------------
-
-This object calls out to your code when ``run`` / ``__call__`` are called and
-allows the result object that will be used to run the test to be altered. This
-is very useful when working with a test runner that doesn't know your test case
-requirements. For instance, it can be used to inject a ``unittest2`` compatible
-adapter when someone attempts to run your test suite with a ``TestResult`` that
-does not support ``addSkip`` or other ``unittest2`` methods. Similarly it can
-aid the migration to ``StreamResult``.
-
-e.g.::
-
- >>> suite = TestSuite()
- >>> suite = DecorateTestCaseResult(suite, ExtendedToOriginalDecorator)
-
-Extensions to TestResult
-========================
-
-StreamResult
-------------
-
-``StreamResult`` is a new API for dealing with test case progress that supports
-concurrent and distributed testing without the various issues that
-``TestResult`` has such as buffering in multiplexers.
-
-The design has several key principles:
-
-* Nothing that requires up-front knowledge of all tests.
-
-* Deal with tests running in concurrent environments, potentially distributed
-  across multiple processes (or even machines). This implies allowing multiple
-  tests to be active at once, supplying time explicitly, being able to
-  differentiate between tests running in different contexts and removing any
-  assumption that tests are necessarily in the same process.
-
-* Make the API as simple as possible - each aspect should do one thing well.
-
-The ``TestResult`` API this is intended to replace has three different clients.
-
-* Each executing ``TestCase`` notifies the ``TestResult`` about activity.
-
-* The testrunner running tests uses the API to find out whether the test run
-  had errors, how many tests ran and so on.
-
-* Finally, each ``TestCase`` queries the ``TestResult`` to see whether the test
-  run should be aborted.
-
-With ``StreamResult`` we need to be able to provide a ``TestResult`` compatible
-adapter (``StreamToExtendedDecorator``) to allow incremental migration.
-However, we don't need to conflate things long term. So - we define three
-separate APIs, and merely mix them together to provide the
-``StreamToExtendedDecorator``. ``StreamResult`` is the first of these APIs -
-meeting the needs of ``TestCase`` clients. It handles events generated by
-running tests. See the API documentation for ``testtools.StreamResult`` for
-details.
-
-StreamSummary
--------------
-
-Secondly we define the ``StreamSummary`` API which takes responsibility for
-collating errors, detecting incomplete tests and counting tests. This provides
-a compatible API with those aspects of ``TestResult``. Again, see the API
-documentation for ``testtools.StreamSummary``.
-
-TestControl
------------
-
-Lastly we define the ``TestControl`` API which is used to provide the
-``shouldStop`` and ``stop`` elements from ``TestResult``. Again, see the API
-documentation for ``testtools.TestControl``. ``TestControl`` can be paired with
-a ``StreamFailFast`` to trigger aborting a test run when a failure is observed.
-Aborting multiple workers in a distributed environment requires hooking
-whatever signalling mechanism the distributed environment has up to a
-``TestControl`` in each worker process.
-
-StreamTagger
-------------
-
-A ``StreamResult`` filter that adds or removes tags from events::
-
-    >>> from testtools import StreamTagger
-    >>> sink = StreamResult()
-    >>> result = StreamTagger([sink], set(['add']), set(['discard']))
-    >>> result.startTestRun()
-    >>> # Run tests against result here.
-    >>> result.stopTestRun()
-
-StreamToDict
-------------
-
-A simplified API for dealing with ``StreamResult`` streams. Each test is
-buffered until it completes and then reported as a trivial dict. This makes
-writing analysers very easy - you can ignore all the plumbing and just work
-with the result. e.g.::
-
-    >>> from testtools import StreamToDict
-    >>> def handle_test(test_dict):
-    ...     print(test_dict['id'])
-    >>> result = StreamToDict(handle_test)
-    >>> result.startTestRun()
-    >>> # Run tests against result here.
-    >>> # At stopTestRun() any incomplete buffered tests are announced.
-    >>> result.stopTestRun()
-
-ExtendedToStreamDecorator
--------------------------
-
-This is a hybrid object that combines both the ``Extended`` and ``Stream``
-``TestResult`` APIs into one class, but only emits ``StreamResult`` events.
-This is useful when a ``StreamResult`` stream is desired, but you cannot
-be sure that the tests which will run have been updated to the ``StreamResult``
-API.
-
-StreamToExtendedDecorator
--------------------------
-
-This is a simple converter that emits the ``ExtendedTestResult`` API in
-response to events from the ``StreamResult`` API. Useful when outputting
-``StreamResult`` events from a ``TestCase`` but the supplied ``TestResult``
-does not support the ``status`` and ``file`` methods.
-
-StreamToQueue
--------------
-
-This is a ``StreamResult`` decorator for reporting tests from multiple threads
-at once. Each method submits an event to a supplied Queue object as a simple
-dict. See ``ConcurrentStreamTestSuite`` for a convenient way to use this.
-
-TimestampingStreamResult
-------------------------
-
-This is a ``StreamResult`` decorator for adding timestamps to events that lack
-them. This allows writing the simplest possible generators of events and
-passing the events via this decorator to get timestamped data. As long as
-no buffering/queueing or blocking happen before the timestamper sees the event
-the timestamp will be as accurate as if the original event had it.
-
-StreamResultRouter
-------------------
-
-This is a ``StreamResult`` which forwards events to an arbitrary set of target
-``StreamResult`` objects. Events that have no forwarding rule are passed onto
-an fallback ``StreamResult`` for processing. The mapping can be changed at
-runtime, allowing great flexibility and responsiveness to changes. Because
-The mapping can change dynamically and there could be the same recipient for
-two different maps, ``startTestRun`` and ``stopTestRun`` handling is fine
-grained and up to the user.
-
-If no fallback has been supplied, an unroutable event will raise an exception.
-
-For instance::
-
-    >>> router = StreamResultRouter()
-    >>> sink = doubles.StreamResult()
-    >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
-    ...     consume_route=True)
-    >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
-
-Would remove the ``0/`` from the route_code and forward the event like so::
-
-    >>> sink.status('test_id=foo', route_code='1', test_status='uxsuccess')
-
-See ``pydoc testtools.StreamResultRouter`` for details.
-
-TestResult.addSkip
-------------------
-
-This method is called on result objects when a test skips. The
-``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
-dict. The can be reported on in much the same way as succesful tests.
-
-
-TestResult.time
----------------
-
-This method controls the time used by a ``TestResult``, permitting accurate
-timing of test results gathered on different machines or in different threads.
-See pydoc testtools.TestResult.time for more details.
-
-
-ThreadsafeForwardingResult
---------------------------
-
-A ``TestResult`` which forwards activity to another test result, but synchronises
-on a semaphore to ensure that all the activity for a single test arrives in a
-batch. This allows simple TestResults which do not expect concurrent test
-reporting to be fed the activity from multiple test threads, or processes.
-
-Note that when you provide multiple errors for a single test, the target sees
-each error as a distinct complete test.
-
-
-MultiTestResult
----------------
-
-A test result that dispatches its events to many test results.  Use this
-to combine multiple different test result objects into one test result object
-that can be passed to ``TestCase.run()`` or similar.  For example::
-
-  a = TestResult()
-  b = TestResult()
-  combined = MultiTestResult(a, b)
-  combined.startTestRun()  # Calls a.startTestRun() and b.startTestRun()
-
-Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
-component test results return.
-
-
-TestResultDecorator
--------------------
-
-Not strictly a ``TestResult``, but something that implements the extended
-``TestResult`` interface of testtools.  It can be subclassed to create objects
-that wrap ``TestResults``.
-
-
-TextTestResult
---------------
-
-A ``TestResult`` that provides a text UI very similar to the Python standard
-library UI. Key differences are that its supports the extended outcomes and
-details API, and is completely encapsulated into the result object, permitting
-it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
-are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
-These limitations will be corrected soon.
-
-
-ExtendedToOriginalDecorator
----------------------------
-
-Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
-meet the testtools ``TestResult`` API.
-
-
-Test Doubles
-------------
-
-In testtools.testresult.doubles there are three test doubles that testtools
-uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
-``ExtendedTestResult``. These TestResult objects implement a single variation of
-the TestResult API each, and log activity to a list ``self._events``. These are
-made available for the convenience of people writing their own extensions.
-
-
-startTestRun and stopTestRun
-----------------------------
-
-Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
-before and after the entire test run. 'stopTestRun' is particularly useful for
-test results that wish to produce summary output.
-
-``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
-methods, and he default testtools runner will call these methods
-appropriately.
-
-The ``startTestRun`` method will reset any errors, failures and so forth on
-the result, making the result object look as if no tests have been run.
-
-
-Extensions to TestSuite
-=======================
-
-ConcurrentTestSuite
--------------------
-
-A TestSuite for parallel testing. This is used in conjuction with a helper that
-runs a single suite in some parallel fashion (for instance, forking, handing
-off to a subprocess, to a compute cloud, or simple threads).
-ConcurrentTestSuite uses the helper to get a number of separate runnable
-objects with a run(result), runs them all in threads using the
-ThreadsafeForwardingResult to coalesce their activity.
-
-ConcurrentStreamTestSuite
--------------------------
-
-A variant of ConcurrentTestSuite that uses the new StreamResult API instead of
-the TestResult API. ConcurrentStreamTestSuite coordinates running some number
-of test/suites concurrently, with one StreamToQueue per test/suite.
-
-Each test/suite gets given its own ExtendedToStreamDecorator +
-TimestampingStreamResult wrapped StreamToQueue instance, forwarding onto the
-StreamResult that ConcurrentStreamTestSuite.run was called with.
-
-ConcurrentStreamTestSuite is a thin shim and it is easy to implement your own
-specialised form if that is needed.
-
-FixtureSuite
-------------
-
-A test suite that sets up a fixture_ before running any tests, and then tears
-it down after all of the tests are run. The fixture is *not* made available to
-any of the tests due to there being no standard channel for suites to pass
-information to the tests they contain (and we don't have enough data on what
-such a channel would need to achieve to design a good one yet - or even decide
-if it is a good idea).
-
-sorted_tests
-------------
-
-Given the composite structure of TestSuite / TestCase, sorting tests is
-problematic - you can't tell what functionality is embedded into custom Suite
-implementations. In order to deliver consistent test orders when using test
-discovery (see http://bugs.python.org/issue16709), testtools flattens and
-sorts tests that have the standard TestSuite, and defines a new method
-sort_tests, which can be used by non-standard TestSuites to know when they
-should sort their tests. An example implementation can be seen at
-``FixtureSuite.sorted_tests``.
-
-If there are duplicate test ids in a suite, ValueError will be raised.
-
-filter_by_ids
--------------
-
-Similarly to ``sorted_tests`` running a subset of tests is problematic - the
-standard run interface provides no way to limit what runs. Rather than
-confounding the two problems (selection and execution) we defined a method
-that filters the tests in a suite (or a case) by their unique test id.
-If you a writing custom wrapping suites, consider implementing filter_by_ids
-to support this (though most wrappers that subclass ``unittest.TestSuite`` will
-work just fine [see ``testtools.testsuite.filter_by_ids`` for details.]
-
-Extensions to TestRunner
-========================
-
-To facilitate custom listing of tests, ``testtools.run.TestProgram`` attempts
-to call ``list`` on the ``TestRunner``, falling back to a generic
-implementation if it is not present.
-
-.. _unittest: http://docs.python.org/library/unittest.html
-.. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/lib/testtools/doc/for-test-authors.rst b/lib/testtools/doc/for-test-authors.rst
deleted file mode 100644
index 5deb7ce..0000000
--- a/lib/testtools/doc/for-test-authors.rst
+++ /dev/null
@@ -1,1485 +0,0 @@
-==========================
-testtools for test authors
-==========================
-
-If you are writing tests for a Python project and you (rather wisely) want to
-use testtools to do so, this is the manual for you.
-
-We assume that you already know Python and that you know something about
-automated testing already.
-
-If you are a test author of an unusually large or unusually unusual test
-suite, you might be interested in :doc:`for-framework-folk`.
-
-You might also be interested in the :doc:`testtools API docs </api>`.
-
-
-Introduction
-============
-
-testtools is a set of extensions to Python's standard unittest module.
-Writing tests with testtools is very much like writing tests with standard
-Python, or with Twisted's "trial_", or nose_, except a little bit easier and
-more enjoyable.
-
-Below, we'll try to give some examples of how to use testtools in its most
-basic way, as well as a sort of feature-by-feature breakdown of the cool bits
-that you could easily miss.
-
-
-The basics
-==========
-
-Here's what a basic testtools unit tests look like::
-
-  from testtools import TestCase
-  from myproject import silly
-
-  class TestSillySquare(TestCase):
-      """Tests for silly square function."""
-
-      def test_square(self):
-          # 'square' takes a number and multiplies it by itself.
-          result = silly.square(7)
-          self.assertEqual(result, 49)
-
-      def test_square_bad_input(self):
-          # 'square' raises a TypeError if it's given bad input, say a
-          # string.
-          self.assertRaises(TypeError, silly.square, "orange")
-
-
-Here you have a class that inherits from ``testtools.TestCase`` and bundles
-together a bunch of related tests.  The tests themselves are methods on that
-class that begin with ``test_``.
-
-Running your tests
-------------------
-
-You can run these tests in many ways.  testtools provides a very basic
-mechanism for doing so::
-
-  $ python -m testtools.run exampletest
-  Tests running...
-  Ran 2 tests in 0.000s
-
-  OK
-
-where 'exampletest' is a module that contains unit tests.  By default,
-``testtools.run`` will *not* recursively search the module or package for unit
-tests.  To do this, you will need to either have the discover_ module
-installed or have Python 2.7 or later, and then run::
-
-  $ python -m testtools.run discover packagecontainingtests
-
-For more information see the Python 2.7 unittest documentation, or::
-
-    python -m testtools.run --help
-
-As your testing needs grow and evolve, you will probably want to use a more
-sophisticated test runner.  There are many of these for Python, and almost all
-of them will happily run testtools tests.  In particular:
-
-* testrepository_
-* Trial_
-* nose_
-* unittest2_
-* `zope.testrunner`_ (aka zope.testing)
-
-From now on, we'll assume that you know how to run your tests.
-
-Running test with Distutils
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you are using Distutils_ to build your Python project, you can use the testtools
-Distutils_ command to integrate testtools into your Distutils_ workflow::
-
-  from distutils.core import setup
-  from testtools import TestCommand
-  setup(name='foo',
-      version='1.0',
-      py_modules=['foo'],
-      cmdclass={'test': TestCommand}
-  )
-
-You can then run::
-
-  $ python setup.py test -m exampletest
-  Tests running...
-  Ran 2 tests in 0.000s
-
-  OK
-
-For more information about the capabilities of the `TestCommand` command see::
-
-	$ python setup.py test --help
-
-You can use the `setup configuration`_ to specify the default behavior of the
-`TestCommand` command.
-
-Assertions
-==========
-
-The core of automated testing is making assertions about the way things are,
-and getting a nice, helpful, informative error message when things are not as
-they ought to be.
-
-All of the assertions that you can find in Python standard unittest_ can be
-found in testtools (remember, testtools extends unittest).  testtools changes
-the behaviour of some of those assertions slightly and adds some new
-assertions that you will almost certainly find useful.
-
-
-Improved assertRaises
----------------------
-
-``TestCase.assertRaises`` returns the caught exception.  This is useful for
-asserting more things about the exception than just the type::
-
-  def test_square_bad_input(self):
-      # 'square' raises a TypeError if it's given bad input, say a
-      # string.
-      e = self.assertRaises(TypeError, silly.square, "orange")
-      self.assertEqual("orange", e.bad_value)
-      self.assertEqual("Cannot square 'orange', not a number.", str(e))
-
-Note that this is incompatible with the ``assertRaises`` in unittest2 and
-Python2.7.
-
-
-ExpectedException
------------------
-
-If you are using a version of Python that supports the ``with`` context
-manager syntax, you might prefer to use that syntax to ensure that code raises
-particular errors.  ``ExpectedException`` does just that.  For example::
-
-  def test_square_root_bad_input_2(self):
-      # 'square' raises a TypeError if it's given bad input.
-      with ExpectedException(TypeError, "Cannot square.*"):
-          silly.square('orange')
-
-The first argument to ``ExpectedException`` is the type of exception you
-expect to see raised.  The second argument is optional, and can be either a
-regular expression or a matcher. If it is a regular expression, the ``str()``
-of the raised exception must match the regular expression. If it is a matcher,
-then the raised exception object must match it. The optional third argument
-``msg`` will cause the raised error to be annotated with that message.
-
-
-assertIn, assertNotIn
----------------------
-
-These two assertions check whether a value is in a sequence and whether a
-value is not in a sequence.  They are "assert" versions of the ``in`` and
-``not in`` operators.  For example::
-
-  def test_assert_in_example(self):
-      self.assertIn('a', 'cat')
-      self.assertNotIn('o', 'cat')
-      self.assertIn(5, list_of_primes_under_ten)
-      self.assertNotIn(12, list_of_primes_under_ten)
-
-
-assertIs, assertIsNot
----------------------
-
-These two assertions check whether values are identical to one another.  This
-is sometimes useful when you want to test something more strict than mere
-equality.  For example::
-
-  def test_assert_is_example(self):
-      foo = [None]
-      foo_alias = foo
-      bar = [None]
-      self.assertIs(foo, foo_alias)
-      self.assertIsNot(foo, bar)
-      self.assertEqual(foo, bar) # They are equal, but not identical
-
-
-assertIsInstance
-----------------
-
-As much as we love duck-typing and polymorphism, sometimes you need to check
-whether or not a value is of a given type.  This method does that.  For
-example::
-
-  def test_assert_is_instance_example(self):
-      now = datetime.now()
-      self.assertIsInstance(now, datetime)
-
-Note that there is no ``assertIsNotInstance`` in testtools currently.
-
-
-expectFailure
--------------
-
-Sometimes it's useful to write tests that fail.  For example, you might want
-to turn a bug report into a unit test, but you don't know how to fix the bug
-yet.  Or perhaps you want to document a known, temporary deficiency in a
-dependency.
-
-testtools gives you the ``TestCase.expectFailure`` to help with this.  You use
-it to say that you expect this assertion to fail.  When the test runs and the
-assertion fails, testtools will report it as an "expected failure".
-
-Here's an example::
-
-  def test_expect_failure_example(self):
-      self.expectFailure(
-          "cats should be dogs", self.assertEqual, 'cats', 'dogs')
-
-As long as 'cats' is not equal to 'dogs', the test will be reported as an
-expected failure.
-
-If ever by some miracle 'cats' becomes 'dogs', then testtools will report an
-"unexpected success".  Unlike standard unittest, testtools treats this as
-something that fails the test suite, like an error or a failure.
-
-
-Matchers
-========
-
-The built-in assertion methods are very useful, they are the bread and butter
-of writing tests.  However, soon enough you will probably want to write your
-own assertions.  Perhaps there are domain specific things that you want to
-check (e.g. assert that two widgets are aligned parallel to the flux grid), or
-perhaps you want to check something that could almost but not quite be found
-in some other standard library (e.g. assert that two paths point to the same
-file).
-
-When you are in such situations, you could either make a base class for your
-project that inherits from ``testtools.TestCase`` and make sure that all of
-your tests derive from that, *or* you could use the testtools ``Matcher``
-system.
-
-
-Using Matchers
---------------
-
-Here's a really basic example using stock matchers found in testtools::
-
-  import testtools
-  from testtools.matchers import Equals
-
-  class TestSquare(TestCase):
-      def test_square(self):
-         result = square(7)
-         self.assertThat(result, Equals(49))
-
-The line ``self.assertThat(result, Equals(49))`` is equivalent to
-``self.assertEqual(result, 49)`` and means "assert that ``result`` equals 49".
-The difference is that ``assertThat`` is a more general method that takes some
-kind of observed value (in this case, ``result``) and any matcher object
-(here, ``Equals(49)``).
-
-The matcher object could be absolutely anything that implements the Matcher
-protocol.  This means that you can make more complex matchers by combining
-existing ones::
-
-  def test_square_silly(self):
-      result = square(7)
-      self.assertThat(result, Not(Equals(50)))
-
-Which is roughly equivalent to::
-
-  def test_square_silly(self):
-      result = square(7)
-      self.assertNotEqual(result, 50)
-
-
-``assert_that`` Function
-------------------------
-
-In addition to ``self.assertThat``, testtools also provides the ``assert_that``
-function in ``testtools.assertions`` This behaves like the method version does::
-
-    class TestSquare(TestCase):
-
-        def test_square():
-            result = square(7)
-            assert_that(result, Equals(49))
-
-        def test_square_silly():
-            result = square(7)
-            assert_that(result, Not(Equals(50)))
-
-
-Delayed Assertions
-~~~~~~~~~~~~~~~~~~
-
-A failure in the ``self.assertThat`` method will immediately fail the test: No
-more test code will be run after the assertion failure.
-
-The ``expectThat`` method behaves the same as ``assertThat`` with one
-exception: when failing the test it does so at the end of the test code rather
-than when the mismatch is detected. For example::
-
-  import subprocess
-
-  from testtools import TestCase
-  from testtools.matchers import Equals
-
-
-  class SomeProcessTests(TestCase):
-
-      def test_process_output(self):
-          process = subprocess.Popen(
-            ["my-app", "/some/path"],
-            stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE
-          )
-
-          stdout, stderrr = process.communicate()
-
-          self.expectThat(process.returncode, Equals(0))
-          self.expectThat(stdout, Equals("Expected Output"))
-          self.expectThat(stderr, Equals(""))
-
-In this example, should the ``expectThat`` call fail, the failure will be
-recorded in the test result, but the test will continue as normal. If all
-three assertions fail, the test result will have three failures recorded, and
-the failure details for each failed assertion will be attached to the test
-result.
-
-Stock matchers
---------------
-
-testtools comes with many matchers built in.  They can all be found in and
-imported from the ``testtools.matchers`` module.
-
-Equals
-~~~~~~
-
-Matches if two items are equal. For example::
-
-  def test_equals_example(self):
-      self.assertThat([42], Equals([42]))
-
-
-Is
-~~~
-
-Matches if two items are identical.  For example::
-
-  def test_is_example(self):
-      foo = object()
-      self.assertThat(foo, Is(foo))
-
-
-IsInstance
-~~~~~~~~~~
-
-Adapts isinstance() to use as a matcher.  For example::
-
-  def test_isinstance_example(self):
-      class MyClass:pass
-      self.assertThat(MyClass(), IsInstance(MyClass))
-      self.assertThat(MyClass(), IsInstance(MyClass, str))
-
-
-The raises helper
-~~~~~~~~~~~~~~~~~
-
-Matches if a callable raises a particular type of exception.  For example::
-
-  def test_raises_example(self):
-      self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
-
-This is actually a convenience function that combines two other matchers:
-Raises_ and MatchesException_.
-
-
-DocTestMatches
-~~~~~~~~~~~~~~
-
-Matches a string as if it were the output of a doctest_ example.  Very useful
-for making assertions about large chunks of text.  For example::
-
-  import doctest
-
-  def test_doctest_example(self):
-      output = "Colorless green ideas"
-      self.assertThat(
-          output,
-          DocTestMatches("Colorless ... ideas", doctest.ELLIPSIS))
-
-We highly recommend using the following flags::
-
-  doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF
-
-
-GreaterThan
-~~~~~~~~~~~
-
-Matches if the given thing is greater than the thing in the matcher.  For
-example::
-
-  def test_greater_than_example(self):
-      self.assertThat(3, GreaterThan(2))
-
-
-LessThan
-~~~~~~~~
-
-Matches if the given thing is less than the thing in the matcher.  For
-example::
-
-  def test_less_than_example(self):
-      self.assertThat(2, LessThan(3))
-
-
-StartsWith, EndsWith
-~~~~~~~~~~~~~~~~~~~~
-
-These matchers check to see if a string starts with or ends with a particular
-substring.  For example::
-
-  def test_starts_and_ends_with_example(self):
-      self.assertThat('underground', StartsWith('und'))
-      self.assertThat('underground', EndsWith('und'))
-
-
-Contains
-~~~~~~~~
-
-This matcher checks to see if the given thing contains the thing in the
-matcher.  For example::
-
-  def test_contains_example(self):
-      self.assertThat('abc', Contains('b'))
-
-
-MatchesException
-~~~~~~~~~~~~~~~~
-
-Matches an exc_info tuple if the exception is of the correct type.  For
-example::
-
-  def test_matches_exception_example(self):
-      try:
-          raise RuntimeError('foo')
-      except RuntimeError:
-          exc_info = sys.exc_info()
-      self.assertThat(exc_info, MatchesException(RuntimeError))
-      self.assertThat(exc_info, MatchesException(RuntimeError('bar')))
-
-Most of the time, you will want to uses `The raises helper`_ instead.
-
-
-NotEquals
-~~~~~~~~~
-
-Matches if something is not equal to something else.  Note that this is subtly
-different to ``Not(Equals(x))``.  ``NotEquals(x)`` will match if ``y != x``,
-``Not(Equals(x))`` will match if ``not y == x``.
-
-You only need to worry about this distinction if you are testing code that
-relies on badly written overloaded equality operators.
-
-
-KeysEqual
-~~~~~~~~~
-
-Matches if the keys of one dict are equal to the keys of another dict.  For
-example::
-
-  def test_keys_equal(self):
-      x = {'a': 1, 'b': 2}
-      y = {'a': 2, 'b': 3}
-      self.assertThat(x, KeysEqual(y))
-
-
-MatchesRegex
-~~~~~~~~~~~~
-
-Matches a string against a regular expression, which is a wonderful thing to
-be able to do, if you think about it::
-
-  def test_matches_regex_example(self):
-      self.assertThat('foo', MatchesRegex('fo+'))
-
-
-HasLength
-~~~~~~~~~
-
-Check the length of a collection.  The following assertion will fail::
-
-  self.assertThat([1, 2, 3], HasLength(2))
-
-But this one won't::
-
-  self.assertThat([1, 2, 3], HasLength(3))
-
-
-File- and path-related matchers
--------------------------------
-
-testtools also has a number of matchers to help with asserting things about
-the state of the filesystem.
-
-PathExists
-~~~~~~~~~~
-
-Matches if a path exists::
-
-  self.assertThat('/', PathExists())
-
-
-DirExists
-~~~~~~~~~
-
-Matches if a path exists and it refers to a directory::
-
-  # This will pass on most Linux systems.
-  self.assertThat('/home/', DirExists())
-  # This will not
-  self.assertThat('/home/jml/some-file.txt', DirExists())
-
-
-FileExists
-~~~~~~~~~~
-
-Matches if a path exists and it refers to a file (as opposed to a directory)::
-
-  # This will pass on most Linux systems.
-  self.assertThat('/bin/true', FileExists())
-  # This will not.
-  self.assertThat('/home/', FileExists())
-
-
-DirContains
-~~~~~~~~~~~
-
-Matches if the given directory contains the specified files and directories.
-Say we have a directory ``foo`` that has the files ``a``, ``b`` and ``c``,
-then::
-
-  self.assertThat('foo', DirContains(['a', 'b', 'c']))
-
-will match, but::
-
-  self.assertThat('foo', DirContains(['a', 'b']))
-
-will not.
-
-The matcher sorts both the input and the list of names we get back from the
-filesystem.
-
-You can use this in a more advanced way, and match the sorted directory
-listing against an arbitrary matcher::
-
-  self.assertThat('foo', DirContains(matcher=Contains('a')))
-
-
-FileContains
-~~~~~~~~~~~~
-
-Matches if the given file has the specified contents.  Say there's a file
-called ``greetings.txt`` with the contents, ``Hello World!``::
-
-  self.assertThat('greetings.txt', FileContains("Hello World!"))
-
-will match.
-
-You can also use this in a more advanced way, and match the contents of the
-file against an arbitrary matcher::
-
-  self.assertThat('greetings.txt', FileContains(matcher=Contains('!')))
-
-
-HasPermissions
-~~~~~~~~~~~~~~
-
-Used for asserting that a file or directory has certain permissions.  Uses
-octal-mode permissions for both input and matching.  For example::
-
-  self.assertThat('/tmp', HasPermissions('1777'))
-  self.assertThat('id_rsa', HasPermissions('0600'))
-
-This is probably more useful on UNIX systems than on Windows systems.
-
-
-SamePath
-~~~~~~~~
-
-Matches if two paths actually refer to the same thing.  The paths don't have
-to exist, but if they do exist, ``SamePath`` will resolve any symlinks.::
-
-  self.assertThat('somefile', SamePath('childdir/../somefile'))
-
-
-TarballContains
-~~~~~~~~~~~~~~~
-
-Matches the contents of a tarball.  In many ways, much like ``DirContains``,
-but instead of matching on ``os.listdir`` matches on ``TarFile.getnames``.
-
-
-Combining matchers
-------------------
-
-One great thing about matchers is that you can readily combine existing
-matchers to get variations on their behaviour or to quickly build more complex
-assertions.
-
-Below are a few of the combining matchers that come with testtools.
-
-
-Not
-~~~
-
-Negates another matcher.  For example::
-
-  def test_not_example(self):
-      self.assertThat([42], Not(Equals("potato")))
-      self.assertThat([42], Not(Is([42])))
-
-If you find yourself using ``Not`` frequently, you may wish to create a custom
-matcher for it.  For example::
-
-  IsNot = lambda x: Not(Is(x))
-
-  def test_not_example_2(self):
-      self.assertThat([42], IsNot([42]))
-
-
-Annotate
-~~~~~~~~
-
-Used to add custom notes to a matcher.  For example::
-
-  def test_annotate_example(self):
-      result = 43
-      self.assertThat(
-          result, Annotate("Not the answer to the Question!", Equals(42)))
-
-Since the annotation is only ever displayed when there is a mismatch
-(e.g. when ``result`` does not equal 42), it's a good idea to phrase the note
-negatively, so that it describes what a mismatch actually means.
-
-As with Not_, you may wish to create a custom matcher that describes a
-common operation.  For example::
-
-  PoliticallyEquals = lambda x: Annotate("Death to the aristos!", Equals(x))
-
-  def test_annotate_example_2(self):
-      self.assertThat("orange", PoliticallyEquals("yellow"))
-
-You can have assertThat perform the annotation for you as a convenience::
-
-  def test_annotate_example_3(self):
-      self.assertThat("orange", Equals("yellow"), "Death to the aristos!")
-
-
-AfterPreprocessing
-~~~~~~~~~~~~~~~~~~
-
-Used to make a matcher that applies a function to the matched object before
-matching. This can be used to aid in creating trivial matchers as functions, for
-example::
-
-  def test_after_preprocessing_example(self):
-      def PathHasFileContent(content):
-          def _read(path):
-              return open(path).read()
-          return AfterPreprocessing(_read, Equals(content))
-      self.assertThat('/tmp/foo.txt', PathHasFileContent("Hello world!"))
-
-
-MatchesAll
-~~~~~~~~~~
-
-Combines many matchers to make a new matcher.  The new matcher will only match
-things that match every single one of the component matchers.
-
-It's much easier to understand in Python than in English::
-
-  def test_matches_all_example(self):
-      has_und_at_both_ends = MatchesAll(StartsWith("und"), EndsWith("und"))
-      # This will succeed.
-      self.assertThat("underground", has_und_at_both_ends)
-      # This will fail.
-      self.assertThat("found", has_und_at_both_ends)
-      # So will this.
-      self.assertThat("undead", has_und_at_both_ends)
-
-At this point some people ask themselves, "why bother doing this at all? why
-not just have two separate assertions?".  It's a good question.
-
-The first reason is that when a ``MatchesAll`` gets a mismatch, the error will
-include information about all of the bits that mismatched.  When you have two
-separate assertions, as below::
-
-  def test_two_separate_assertions(self):
-       self.assertThat("foo", StartsWith("und"))
-       self.assertThat("foo", EndsWith("und"))
-
-Then you get absolutely no information from the second assertion if the first
-assertion fails.  Tests are largely there to help you debug code, so having
-more information in error messages is a big help.
-
-The second reason is that it is sometimes useful to give a name to a set of
-matchers. ``has_und_at_both_ends`` is a bit contrived, of course, but it is
-clear.  The ``FileExists`` and ``DirExists`` matchers included in testtools
-are perhaps better real examples.
-
-If you want only the first mismatch to be reported, pass ``first_only=True``
-as a keyword parameter to ``MatchesAll``.
-
-
-MatchesAny
-~~~~~~~~~~
-
-Like MatchesAll_, ``MatchesAny`` combines many matchers to make a new
-matcher.  The difference is that the new matchers will match a thing if it
-matches *any* of the component matchers.
-
-For example::
-
-  def test_matches_any_example(self):
-      self.assertThat(42, MatchesAny(Equals(5), Not(Equals(6))))
-
-
-AllMatch
-~~~~~~~~
-
-Matches many values against a single matcher.  Can be used to make sure that
-many things all meet the same condition::
-
-  def test_all_match_example(self):
-      self.assertThat([2, 3, 5, 7], AllMatch(LessThan(10)))
-
-If the match fails, then all of the values that fail to match will be included
-in the error message.
-
-In some ways, this is the converse of MatchesAll_.
-
-
-MatchesListwise
-~~~~~~~~~~~~~~~
-
-Where ``MatchesAny`` and ``MatchesAll`` combine many matchers to match a
-single value, ``MatchesListwise`` combines many matches to match many values.
-
-For example::
-
-  def test_matches_listwise_example(self):
-      self.assertThat(
-          [1, 2, 3], MatchesListwise(map(Equals, [1, 2, 3])))
-
-This is useful for writing custom, domain-specific matchers.
-
-If you want only the first mismatch to be reported, pass ``first_only=True``
-to ``MatchesListwise``.
-
-
-MatchesSetwise
-~~~~~~~~~~~~~~
-
-Combines many matchers to match many values, without regard to their order.
-
-Here's an example::
-
-  def test_matches_setwise_example(self):
-      self.assertThat(
-          [1, 2, 3], MatchesSetwise(Equals(2), Equals(3), Equals(1)))
-
-Much like ``MatchesListwise``, best used for writing custom, domain-specific
-matchers.
-
-
-MatchesStructure
-~~~~~~~~~~~~~~~~
-
-Creates a matcher that matches certain attributes of an object against a
-pre-defined set of matchers.
-
-It's much easier to understand in Python than in English::
-
-  def test_matches_structure_example(self):
-      foo = Foo()
-      foo.a = 1
-      foo.b = 2
-      matcher = MatchesStructure(a=Equals(1), b=Equals(2))
-      self.assertThat(foo, matcher)
-
-Since all of the matchers used were ``Equals``, we could also write this using
-the ``byEquality`` helper::
-
-  def test_matches_structure_example(self):
-      foo = Foo()
-      foo.a = 1
-      foo.b = 2
-      matcher = MatchesStructure.byEquality(a=1, b=2)
-      self.assertThat(foo, matcher)
-
-``MatchesStructure.fromExample`` takes an object and a list of attributes and
-creates a ``MatchesStructure`` matcher where each attribute of the matched
-object must equal each attribute of the example object.  For example::
-
-      matcher = MatchesStructure.fromExample(foo, 'a', 'b')
-
-is exactly equivalent to ``matcher`` in the previous example.
-
-
-MatchesPredicate
-~~~~~~~~~~~~~~~~
-
-Sometimes, all you want to do is create a matcher that matches if a given
-function returns True, and mismatches if it returns False.
-
-For example, you might have an ``is_prime`` function and want to make a
-matcher based on it::
-
-  def test_prime_numbers(self):
-      IsPrime = MatchesPredicate(is_prime, '%s is not prime.')
-      self.assertThat(7, IsPrime)
-      self.assertThat(1983, IsPrime)
-      # This will fail.
-      self.assertThat(42, IsPrime)
-
-Which will produce the error message::
-
-  Traceback (most recent call last):
-    File "...", line ..., in test_prime_numbers
-      self.assertThat(42, IsPrime)
-  MismatchError: 42 is not prime.
-
-
-MatchesPredicateWithParams
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Sometimes you can't use a trivial predicate and instead need to pass in some
-parameters each time. In that case, MatchesPredicateWithParams is your go-to
-tool for creating ad hoc matchers. MatchesPredicateWithParams takes a predicate
-function and message and returns a factory to produce matchers from that. The
-predicate needs to return a boolean (or any truthy object), and accept the
-object to match + whatever was passed into the factory.
-
-For example, you might have an ``divisible`` function and want to make a
-matcher based on it::
-
-  def test_divisible_numbers(self):
-      IsDivisibleBy = MatchesPredicateWithParams(
-          divisible, '{0} is not divisible by {1}')
-      self.assertThat(7, IsDivisibleBy(1))
-      self.assertThat(7, IsDivisibleBy(7))
-      self.assertThat(7, IsDivisibleBy(2))
-      # This will fail.
-
-Which will produce the error message::
-
-  Traceback (most recent call last):
-    File "...", line ..., in test_divisible
-      self.assertThat(7, IsDivisibleBy(2))
-  MismatchError: 7 is not divisible by 2.
-
-
-Raises
-~~~~~~
-
-Takes whatever the callable raises as an exc_info tuple and matches it against
-whatever matcher it was given.  For example, if you want to assert that a
-callable raises an exception of a given type::
-
-  def test_raises_example(self):
-      self.assertThat(
-          lambda: 1/0, Raises(MatchesException(ZeroDivisionError)))
-
-Although note that this could also be written as::
-
-  def test_raises_example_convenient(self):
-      self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
-
-See also MatchesException_ and `the raises helper`_
-
-
-Writing your own matchers
--------------------------
-
-Combining matchers is fun and can get you a very long way indeed, but
-sometimes you will have to write your own.  Here's how.
-
-You need to make two closely-linked objects: a ``Matcher`` and a
-``Mismatch``.  The ``Matcher`` knows how to actually make the comparison, and
-the ``Mismatch`` knows how to describe a failure to match.
-
-Here's an example matcher::
-
-  class IsDivisibleBy(object):
-      """Match if a number is divisible by another number."""
-      def __init__(self, divider):
-          self.divider = divider
-      def __str__(self):
-          return 'IsDivisibleBy(%s)' % (self.divider,)
-      def match(self, actual):
-          remainder = actual % self.divider
-          if remainder != 0:
-              return IsDivisibleByMismatch(actual, self.divider, remainder)
-          else:
-              return None
-
-The matcher has a constructor that takes parameters that describe what you
-actually *expect*, in this case a number that other numbers ought to be
-divisible by.  It has a ``__str__`` method, the result of which is displayed
-on failure by ``assertThat`` and a ``match`` method that does the actual
-matching.
-
-``match`` takes something to match against, here ``actual``, and decides
-whether or not it matches.  If it does match, then ``match`` must return
-``None``.  If it does *not* match, then ``match`` must return a ``Mismatch``
-object. ``assertThat`` will call ``match`` and then fail the test if it
-returns a non-None value.  For example::
-
-  def test_is_divisible_by_example(self):
-      # This succeeds, since IsDivisibleBy(5).match(10) returns None.
-      self.assertThat(10, IsDivisibleBy(5))
-      # This fails, since IsDivisibleBy(7).match(10) returns a mismatch.
-      self.assertThat(10, IsDivisibleBy(7))
-
-The mismatch is responsible for what sort of error message the failing test
-generates.  Here's an example mismatch::
-
-  class IsDivisibleByMismatch(object):
-      def __init__(self, number, divider, remainder):
-          self.number = number
-          self.divider = divider
-          self.remainder = remainder
-
-      def describe(self):
-          return "%r is not divisible by %r, %r remains" % (
-              self.number, self.divider, self.remainder)
-
-      def get_details(self):
-          return {}
-
-The mismatch takes information about the mismatch, and provides a ``describe``
-method that assembles all of that into a nice error message for end users.
-You can use the ``get_details`` method to provide extra, arbitrary data with
-the mismatch (e.g. the contents of a log file).  Most of the time it's fine to
-just return an empty dict.  You can read more about Details_ elsewhere in this
-document.
-
-Sometimes you don't need to create a custom mismatch class.  In particular, if
-you don't care *when* the description is calculated, then you can just do that
-in the Matcher itself like this::
-
-  def match(self, actual):
-      remainder = actual % self.divider
-      if remainder != 0:
-          return Mismatch(
-              "%r is not divisible by %r, %r remains" % (
-                  actual, self.divider, remainder))
-      else:
-          return None
-
-When writing a ``describe`` method or constructing a ``Mismatch`` object the
-code should ensure it only emits printable unicode.  As this output must be
-combined with other text and forwarded for presentation, letting through
-non-ascii bytes of ambiguous encoding or control characters could throw an
-exception or mangle the display.  In most cases simply avoiding the ``%s``
-format specifier and using ``%r`` instead will be enough.  For examples of
-more complex formatting see the ``testtools.matchers`` implementatons.
-
-
-Details
-=======
-
-As we may have mentioned once or twice already, one of the great benefits of
-automated tests is that they help find, isolate and debug errors in your
-system.
-
-Frequently however, the information provided by a mere assertion failure is
-not enough.  It's often useful to have other information: the contents of log
-files; what queries were run; benchmark timing information; what state certain
-subsystem components are in and so forth.
-
-testtools calls all of these things "details" and provides a single, powerful
-mechanism for including this information in your test run.
-
-Here's an example of how to add them::
-
-  from testtools import TestCase
-  from testtools.content import text_content
-
-  class TestSomething(TestCase):
-
-      def test_thingy(self):
-          self.addDetail('arbitrary-color-name', text_content("blue"))
-          1 / 0 # Gratuitous error!
-
-A detail an arbitrary piece of content given a name that's unique within the
-test.  Here the name is ``arbitrary-color-name`` and the content is
-``text_content("blue")``.  The name can be any text string, and the content
-can be any ``testtools.content.Content`` object.
-
-When the test runs, testtools will show you something like this::
-
-  ======================================================================
-  ERROR: exampletest.TestSomething.test_thingy
-  ----------------------------------------------------------------------
-  arbitrary-color-name: {{{blue}}}
-
-  Traceback (most recent call last):
-    File "exampletest.py", line 8, in test_thingy
-      1 / 0 # Gratuitous error!
-  ZeroDivisionError: integer division or modulo by zero
-  ------------
-  Ran 1 test in 0.030s
-
-As you can see, the detail is included as an attachment, here saying
-that our arbitrary-color-name is "blue".
-
-
-Content
--------
-
-For the actual content of details, testtools uses its own MIME-based Content
-object.  This allows you to attach any information that you could possibly
-conceive of to a test, and allows testtools to use or serialize that
-information.
-
-The basic ``testtools.content.Content`` object is constructed from a
-``testtools.content.ContentType`` and a nullary callable that must return an
-iterator of chunks of bytes that the content is made from.
-
-So, to make a Content object that is just a simple string of text, you can
-do::
-
-  from testtools.content import Content
-  from testtools.content_type import ContentType
-
-  text = Content(ContentType('text', 'plain'), lambda: ["some text"])
-
-Because adding small bits of text content is very common, there's also a
-convenience method::
-
-  text = text_content("some text")
-
-To make content out of an image stored on disk, you could do something like::
-
-  image = Content(ContentType('image', 'png'), lambda: open('foo.png').read())
-
-Or you could use the convenience function::
-
-  image = content_from_file('foo.png', ContentType('image', 'png'))
-
-The ``lambda`` helps make sure that the file is opened and the actual bytes
-read only when they are needed – by default, when the test is finished.  This
-means that tests can construct and add Content objects freely without worrying
-too much about how they affect run time.
-
-
-A realistic example
--------------------
-
-A very common use of details is to add a log file to failing tests.  Say your
-project has a server represented by a class ``SomeServer`` that you can start
-up and shut down in tests, but runs in another process.  You want to test
-interaction with that server, and whenever the interaction fails, you want to
-see the client-side error *and* the logs from the server-side.  Here's how you
-might do it::
-
-  from testtools import TestCase
-  from testtools.content import attach_file, Content
-  from testtools.content_type import UTF8_TEXT
-
-  from myproject import SomeServer
-
-  class SomeTestCase(TestCase):
-
-      def setUp(self):
-          super(SomeTestCase, self).setUp()
-          self.server = SomeServer()
-          self.server.start_up()
-          self.addCleanup(self.server.shut_down)
-          self.addCleanup(attach_file, self.server.logfile, self)
-
-      def attach_log_file(self):
-          self.addDetail(
-              'log-file',
-              Content(UTF8_TEXT,
-                      lambda: open(self.server.logfile, 'r').readlines()))
-
-      def test_a_thing(self):
-          self.assertEqual("cool", self.server.temperature)
-
-This test will attach the log file of ``SomeServer`` to each test that is
-run.  testtools will only display the log file for failing tests, so it's not
-such a big deal.
-
-If the act of adding at detail is expensive, you might want to use
-addOnException_ so that you only do it when a test actually raises an
-exception.
-
-
-Controlling test execution
-==========================
-
-.. _addCleanup:
-
-addCleanup
-----------
-
-``TestCase.addCleanup`` is a robust way to arrange for a clean up function to
-be called before ``tearDown``.  This is a powerful and simple alternative to
-putting clean up logic in a try/finally block or ``tearDown`` method.  For
-example::
-
-  def test_foo(self):
-      foo.lock()
-      self.addCleanup(foo.unlock)
-      ...
-
-This is particularly useful if you have some sort of factory in your test::
-
-  def make_locked_foo(self):
-      foo = Foo()
-      foo.lock()
-      self.addCleanup(foo.unlock)
-      return foo
-
-  def test_frotz_a_foo(self):
-      foo = self.make_locked_foo()
-      foo.frotz()
-      self.assertEqual(foo.frotz_count, 1)
-
-Any extra arguments or keyword arguments passed to ``addCleanup`` are passed
-to the callable at cleanup time.
-
-Cleanups can also report multiple errors, if appropriate by wrapping them in
-a ``testtools.MultipleExceptions`` object::
-
-  raise MultipleExceptions(exc_info1, exc_info2)
-
-
-Fixtures
---------
-
-Tests often depend on a system being set up in a certain way, or having
-certain resources available to them.  Perhaps a test needs a connection to the
-database or access to a running external server.
-
-One common way of doing this is to do::
-
-  class SomeTest(TestCase):
-      def setUp(self):
-          super(SomeTest, self).setUp()
-          self.server = Server()
-          self.server.setUp()
-          self.addCleanup(self.server.tearDown)
-
-testtools provides a more convenient, declarative way to do the same thing::
-
-  class SomeTest(TestCase):
-      def setUp(self):
-          super(SomeTest, self).setUp()
-          self.server = self.useFixture(Server())
-
-``useFixture(fixture)`` calls ``setUp`` on the fixture, schedules a clean up
-to clean it up, and schedules a clean up to attach all details_ held by the
-fixture to the test case.  The fixture object must meet the
-``fixtures.Fixture`` protocol (version 0.3.4 or newer, see fixtures_).
-
-If you have anything beyond the most simple test set up, we recommend that
-you put this set up into a ``Fixture`` class.  Once there, the fixture can be
-easily re-used by other tests and can be combined with other fixtures to make
-more complex resources.
-
-
-Skipping tests
---------------
-
-Many reasons exist to skip a test: a dependency might be missing; a test might
-be too expensive and thus should not berun while on battery power; or perhaps
-the test is testing an incomplete feature.
-
-``TestCase.skipTest`` is a simple way to have a test stop running and be
-reported as a skipped test, rather than a success, error or failure.  For
-example::
-
-  def test_make_symlink(self):
-      symlink = getattr(os, 'symlink', None)
-      if symlink is None:
-          self.skipTest("No symlink support")
-      symlink(whatever, something_else)
-
-Using ``skipTest`` means that you can make decisions about what tests to run
-as late as possible, and close to the actual tests.  Without it, you might be
-forced to use convoluted logic during test loading, which is a bit of a mess.
-
-
-Legacy skip support
-~~~~~~~~~~~~~~~~~~~
-
-If you are using this feature when running your test suite with a legacy
-``TestResult`` object that is missing the ``addSkip`` method, then the
-``addError`` method will be invoked instead.  If you are using a test result
-from testtools, you do not have to worry about this.
-
-In older versions of testtools, ``skipTest`` was known as ``skip``. Since
-Python 2.7 added ``skipTest`` support, the ``skip`` name is now deprecated.
-No warning is emitted yet – some time in the future we may do so.
-
-
-addOnException
---------------
-
-Sometimes, you might wish to do something only when a test fails.  Perhaps you
-need to run expensive diagnostic routines or some such.
-``TestCase.addOnException`` allows you to easily do just this.  For example::
-
-  class SomeTest(TestCase):
-      def setUp(self):
-          super(SomeTest, self).setUp()
-          self.server = self.useFixture(SomeServer())
-          self.addOnException(self.attach_server_diagnostics)
-
-      def attach_server_diagnostics(self, exc_info):
-          self.server.prep_for_diagnostics() # Expensive!
-          self.addDetail('server-diagnostics', self.server.get_diagnostics)
-
-      def test_a_thing(self):
-          self.assertEqual('cheese', 'chalk')
-
-In this example, ``attach_server_diagnostics`` will only be called when a test
-fails.  It is given the exc_info tuple of the error raised by the test, just
-in case it is needed.
-
-
-Twisted support
----------------
-
-testtools provides *highly experimental* support for running Twisted tests –
-tests that return a Deferred_ and rely on the Twisted reactor.  You should not
-use this feature right now.  We reserve the right to change the API and
-behaviour without telling you first.
-
-However, if you are going to, here's how you do it::
-
-  from testtools import TestCase
-  from testtools.deferredruntest import AsynchronousDeferredRunTest
-
-  class MyTwistedTests(TestCase):
-
-      run_tests_with = AsynchronousDeferredRunTest
-
-      def test_foo(self):
-          # ...
-          return d
-
-In particular, note that you do *not* have to use a special base ``TestCase``
-in order to run Twisted tests.
-
-You can also run individual tests within a test case class using the Twisted
-test runner::
-
-   class MyTestsSomeOfWhichAreTwisted(TestCase):
-
-       def test_normal(self):
-           pass
-
-       @run_test_with(AsynchronousDeferredRunTest)
-       def test_twisted(self):
-           # ...
-           return d
-
-Here are some tips for converting your Trial tests into testtools tests.
-
-* Use the ``AsynchronousDeferredRunTest`` runner
-* Make sure to upcall to ``setUp`` and ``tearDown``
-* Don't use ``setUpClass`` or ``tearDownClass``
-* Don't expect setting .todo, .timeout or .skip attributes to do anything
-* ``flushLoggedErrors`` is ``testtools.deferredruntest.flush_logged_errors``
-* ``assertFailure`` is ``testtools.deferredruntest.assert_fails_with``
-* Trial spins the reactor a couple of times before cleaning it up,
-  ``AsynchronousDeferredRunTest`` does not.  If you rely on this behavior, use
-  ``AsynchronousDeferredRunTestForBrokenTwisted``.
-
-force_failure
--------------
-
-Setting the ``testtools.TestCase.force_failure`` instance variable to ``True``
-will cause the test to be marked as a failure, but won't stop the test code
-from running (see :ref:`force_failure`).
-
-
-Test helpers
-============
-
-testtools comes with a few little things that make it a little bit easier to
-write tests.
-
-
-TestCase.patch
---------------
-
-``patch`` is a convenient way to monkey-patch a Python object for the duration
-of your test.  It's especially useful for testing legacy code.  e.g.::
-
-  def test_foo(self):
-      my_stream = StringIO()
-      self.patch(sys, 'stderr', my_stream)
-      run_some_code_that_prints_to_stderr()
-      self.assertEqual('', my_stream.getvalue())
-
-The call to ``patch`` above masks ``sys.stderr`` with ``my_stream`` so that
-anything printed to stderr will be captured in a StringIO variable that can be
-actually tested. Once the test is done, the real ``sys.stderr`` is restored to
-its rightful place.
-
-
-Creation methods
-----------------
-
-Often when writing unit tests, you want to create an object that is a
-completely normal instance of its type.  You don't want there to be anything
-special about its properties, because you are testing generic behaviour rather
-than specific conditions.
-
-A lot of the time, test authors do this by making up silly strings and numbers
-and passing them to constructors (e.g. 42, 'foo', "bar" etc), and that's
-fine.  However, sometimes it's useful to be able to create arbitrary objects
-at will, without having to make up silly sample data.
-
-To help with this, ``testtools.TestCase`` implements creation methods called
-``getUniqueString`` and ``getUniqueInteger``.  They return strings and
-integers that are unique within the context of the test that can be used to
-assemble more complex objects.  Here's a basic example where
-``getUniqueString`` is used instead of saying "foo" or "bar" or whatever::
-
-  class SomeTest(TestCase):
-
-      def test_full_name(self):
-          first_name = self.getUniqueString()
-          last_name = self.getUniqueString()
-          p = Person(first_name, last_name)
-          self.assertEqual(p.full_name, "%s %s" % (first_name, last_name))
-
-
-And here's how it could be used to make a complicated test::
-
-  class TestCoupleLogic(TestCase):
-
-      def make_arbitrary_person(self):
-          return Person(self.getUniqueString(), self.getUniqueString())
-
-      def test_get_invitation(self):
-          a = self.make_arbitrary_person()
-          b = self.make_arbitrary_person()
-          couple = Couple(a, b)
-          event_name = self.getUniqueString()
-          invitation = couple.get_invitation(event_name)
-          self.assertEqual(
-              invitation,
-              "We invite %s and %s to %s" % (
-                  a.full_name, b.full_name, event_name))
-
-Essentially, creation methods like these are a way of reducing the number of
-assumptions in your tests and communicating to test readers that the exact
-details of certain variables don't actually matter.
-
-See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed
-discussion of creation methods.
-
-Test attributes
----------------
-
-Inspired by the ``nosetests`` ``attr`` plugin, testtools provides support for
-marking up test methods with attributes, which are then exposed in the test
-id and can be used when filtering tests by id. (e.g. via ``--load-list``)::
-
-  from testtools.testcase import attr, WithAttributes
-
-  class AnnotatedTests(WithAttributes, TestCase):
-
-      @attr('simple')
-      def test_one(self):
-          pass
-
-      @attr('more', 'than', 'one')
-      def test_two(self):
-          pass
-
-      @attr('or')
-      @attr('stacked')
-      def test_three(self):
-          pass
-
-General helpers
-===============
-
-Conditional imports
--------------------
-
-Lots of the time we would like to conditionally import modules.  testtools
-uses the small library extras to do this. This used to be part of testtools.
-
-Instead of::
-
-  try:
-      from twisted.internet import defer
-  except ImportError:
-      defer = None
-
-You can do::
-
-   defer = try_import('twisted.internet.defer')
-
-
-Instead of::
-
-  try:
-      from StringIO import StringIO
-  except ImportError:
-      from io import StringIO
-
-You can do::
-
-  StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
-
-
-Safe attribute testing
-----------------------
-
-``hasattr`` is broken_ on many versions of Python. The helper ``safe_hasattr``
-can be used to safely test whether an object has a particular attribute. Like
-``try_import`` this used to be in testtools but is now in extras.
-
-
-Nullary callables
------------------
-
-Sometimes you want to be able to pass around a function with the arguments
-already specified.  The normal way of doing this in Python is::
-
-  nullary = lambda: f(*args, **kwargs)
-  nullary()
-
-Which is mostly good enough, but loses a bit of debugging information.  If you
-take the ``repr()`` of ``nullary``, you're only told that it's a lambda, and
-you get none of the juicy meaning that you'd get from the ``repr()`` of ``f``.
-
-The solution is to use ``Nullary`` instead::
-
-  nullary = Nullary(f, *args, **kwargs)
-  nullary()
-
-Here, ``repr(nullary)`` will be the same as ``repr(f)``.
-
-
-.. _testrepository: https://launchpad.net/testrepository
-.. _Trial: http://twistedmatrix.com/documents/current/core/howto/testing.html
-.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
-.. _unittest2: http://pypi.python.org/pypi/unittest2
-.. _zope.testrunner: http://pypi.python.org/pypi/zope.testrunner/
-.. _xUnit test patterns: http://xunitpatterns.com/
-.. _fixtures: http://pypi.python.org/pypi/fixtures
-.. _unittest: http://docs.python.org/library/unittest.html
-.. _doctest: http://docs.python.org/library/doctest.html
-.. _Deferred: http://twistedmatrix.com/documents/current/core/howto/defer.html
-.. _discover: http://pypi.python.org/pypi/discover
-.. _Distutils: http://docs.python.org/library/distutils.html
-.. _`setup configuration`: http://docs.python.org/distutils/configfile.html
-.. _broken: http://chipaca.com/post/3210673069/hasattr-17-less-harmful
diff --git a/lib/testtools/doc/hacking.rst b/lib/testtools/doc/hacking.rst
deleted file mode 100644
index b25461f..0000000
--- a/lib/testtools/doc/hacking.rst
+++ /dev/null
@@ -1,194 +0,0 @@
-=========================
-Contributing to testtools
-=========================
-
-Bugs and patches
-----------------
-
-`File bugs <https://bugs.launchpad.net/testtools/+filebug>` on Launchpad, and
-`send patches <https://github.com/testing-cabal/testtools/>` on Github.
-
-
-Coding style
-------------
-
-In general, follow `PEP 8`_ except where consistency with the standard
-library's unittest_ module would suggest otherwise.
-
-testtools currently supports Python 2.6 and later, including Python 3.
-
-Copyright assignment
---------------------
-
-Part of testtools raison d'etre is to provide Python with improvements to the
-testing code it ships. For that reason we require all contributions (that are
-non-trivial) to meet one of the following rules:
-
-* be inapplicable for inclusion in Python.
-* be able to be included in Python without further contact with the contributor.
-* be copyright assigned to Jonathan M. Lange.
-
-Please pick one of these and specify it when contributing code to testtools.
-
-
-Licensing
----------
-
-All code that is not copyright assigned to Jonathan M. Lange (see Copyright
-Assignment above) needs to be licensed under the `MIT license`_ that testtools
-uses, so that testtools can ship it.
-
-
-Testing
--------
-
-Please write tests for every feature.  This project ought to be a model
-example of well-tested Python code!
-
-Take particular care to make sure the *intent* of each test is clear.
-
-You can run tests with ``make check``.
-
-By default, testtools hides many levels of its own stack when running tests.
-This is for the convenience of users, who do not care about how, say, assert
-methods are implemented. However, when writing tests for testtools itself, it
-is often useful to see all levels of the stack. To do this, add
-``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
-
-
-Discussion
-----------
-
-When submitting a patch, it will help the review process a lot if there's a
-clear explanation of what the change does and why you think the change is a
-good idea.  For crasher bugs, this is generally a no-brainer, but for UI bugs
-& API tweaks, the reason something is an improvement might not be obvious, so
-it's worth spelling out.
-
-If you are thinking of implementing a new feature, you might want to have that
-discussion on the [mailing list](testtools-dev at lists.launchpad.net) before the
-patch goes up for review.  This is not at all mandatory, but getting feedback
-early can help avoid dead ends.
-
-
-Documentation
--------------
-
-Documents are written using the Sphinx_ variant of reStructuredText_.  All
-public methods, functions, classes and modules must have API documentation.
-When changing code, be sure to check the API documentation to see if it could
-be improved.  Before submitting changes to trunk, look over them and see if
-the manuals ought to be updated.
-
-
-Source layout
--------------
-
-The top-level directory contains the ``testtools/`` package directory, and
-miscellaneous files like ``README.rst`` and ``setup.py``.
-
-The ``testtools/`` directory is the Python package itself.  It is separated
-into submodules for internal clarity, but all public APIs should be “promoted”
-into the top-level package by importing them in ``testtools/__init__.py``.
-Users of testtools should never import a submodule in order to use a stable
-API.  Unstable APIs like ``testtools.matchers`` and
-``testtools.deferredruntest`` should be exported as submodules.
-
-Tests belong in ``testtools/tests/``.
-
-
-Committing to trunk
--------------------
-
-Testtools is maintained using git, with its master repo at
-https://github.com/testing-cabal/testtools. This gives every contributor the
-ability to commit their work to their own branches. However permission must be
-granted to allow contributors to commit to the trunk branch.
-
-Commit access to trunk is obtained by joining the `testing-cabal`_, either as an
-Owner or a Committer. Commit access is contingent on obeying the testtools
-contribution policy, see `Copyright Assignment`_ above.
-
-
-Code Review
------------
-
-All code must be reviewed before landing on trunk. The process is to create a
-branch on Github, and make a pull request into trunk. It will then be reviewed
-before it can be merged to trunk. It will be reviewed by someone:
-
-* not the author
-* a committer
-
-As a special exception, since there are few testtools committers and thus
-reviews are prone to blocking, a pull request from a committer that has not been
-reviewed after 24 hours may be merged by that committer. When the team is larger
-this policy will be revisited.
-
-Code reviewers should look for the quality of what is being submitted,
-including conformance with this HACKING file.
-
-Changes which all users should be made aware of should be documented in NEWS.
-
-We are now in full backwards compatibility mode - no more releases < 1.0.0, and
-breaking compatibility will require consensus on the testtools-dev mailing list.
-Exactly what constitutes a backwards incompatible change is vague, but coarsely:
-
-* adding required arguments or required calls to something that used to work
-* removing keyword or position arguments, removing methods, functions or modules
-* changing behaviour someone may have reasonably depended on
-
-Some things are not compatibility issues:
-
-* changes to _ prefixed methods, functions, modules, packages.
-
-
-NEWS management
----------------
-
-The file NEWS is structured as a sorted list of releases. Each release can have
-a free form description and more or more sections with bullet point items.
-Sections in use today are 'Improvements' and 'Changes'. To ease merging between
-branches, the bullet points are kept alphabetically sorted. The release NEXT is
-permanently present at the top of the list.
-
-
-Releasing
----------
-
-Prerequisites
-+++++++++++++
-
-Membership in the testing-cabal org on github as committer.
-
-Membership in the pypi testtools project as maintainer.
-
-Membership in the https://launchpad.net/~testtools-committers.
-
-Tasks
-+++++
-
-#. Choose a version number, say X.Y.Z
-#. In trunk, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
-#. Under NEXT in NEWS add a heading with the version number X.Y.Z.
-#. Possibly write a blurb into NEWS.
-#. Commit the changes.
-#. Tag the release, ``git tag -s testtools-X.Y.Z``
-#. Run 'make release', this:
-   #. Creates a source distribution and uploads to PyPI
-   #. Ensures all Fix Committed bugs are in the release milestone
-   #. Makes a release on Launchpad and uploads the tarball
-   #. Marks all the Fix Committed bugs as Fix Released
-   #. Creates a new milestone
-#. Change __version__ in __init__.py to the probable next version.
-   e.g. to ``(X, Y, Z+1, 'dev', 0)``.
-#. Commit 'Opening X.Y.Z+1 for development.'
-#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
-#. Push trunk to Github, ``git push --tags origin master``
-
-.. _PEP 8: http://www.python.org/dev/peps/pep-0008/
-.. _unittest: http://docs.python.org/library/unittest.html
-.. _MIT license: http://www.opensource.org/licenses/mit-license.php
-.. _Sphinx: http://sphinx.pocoo.org/
-.. _restructuredtext: http://docutils.sourceforge.net/rst.html
-.. _testing-cabal: https://github.com/organizations/testing-cabal/
diff --git a/lib/testtools/doc/index.rst b/lib/testtools/doc/index.rst
deleted file mode 100644
index a6c05a9..0000000
--- a/lib/testtools/doc/index.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-.. testtools documentation master file, created by
-   sphinx-quickstart on Sun Nov 28 13:45:40 2010.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-testtools: tasteful testing for Python
-======================================
-
-testtools is a set of extensions to the Python standard library's unit testing
-framework. These extensions have been derived from many years of experience
-with unit testing in Python and come from many different sources. testtools
-also ports recent unittest changes all the way back to Python 2.4.  The next
-release of testtools will change that to support versions that are maintained
-by the Python community instead, to allow the use of modern language features
-within testtools.
-
-
-Contents:
-
-.. toctree::
-   :maxdepth: 1
-
-   overview
-   for-test-authors
-   for-framework-folk
-   hacking
-   Changes to testtools <news>
-   API reference documentation <api>
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
diff --git a/lib/testtools/doc/make.bat b/lib/testtools/doc/make.bat
deleted file mode 100644
index f8c1fd5..0000000
--- a/lib/testtools/doc/make.bat
+++ /dev/null
@@ -1,113 +0,0 @@
- at ECHO OFF
-
-REM Command file for Sphinx documentation
-
-set SPHINXBUILD=sphinx-build
-set BUILDDIR=_build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
-if NOT "%PAPER%" == "" (
-	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
-)
-
-if "%1" == "" goto help
-
-if "%1" == "help" (
-	:help
-	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html      to make standalone HTML files
-	echo.  dirhtml   to make HTML files named index.html in directories
-	echo.  pickle    to make pickle files
-	echo.  json      to make JSON files
-	echo.  htmlhelp  to make HTML files and a HTML help project
-	echo.  qthelp    to make HTML files and a qthelp project
-	echo.  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  changes   to make an overview over all changed/added/deprecated items
-	echo.  linkcheck to check all external links for integrity
-	echo.  doctest   to run all doctests embedded in the documentation if enabled
-	goto end
-)
-
-if "%1" == "clean" (
-	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
-	del /q /s %BUILDDIR%\*
-	goto end
-)
-
-if "%1" == "html" (
-	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
-	goto end
-)
-
-if "%1" == "dirhtml" (
-	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
-	goto end
-)
-
-if "%1" == "pickle" (
-	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
-	echo.
-	echo.Build finished; now you can process the pickle files.
-	goto end
-)
-
-if "%1" == "json" (
-	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
-	echo.
-	echo.Build finished; now you can process the JSON files.
-	goto end
-)
-
-if "%1" == "htmlhelp" (
-	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
-	echo.
-	echo.Build finished; now you can run HTML Help Workshop with the ^
-.hhp project file in %BUILDDIR%/htmlhelp.
-	goto end
-)
-
-if "%1" == "qthelp" (
-	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
-	echo.
-	echo.Build finished; now you can run "qcollectiongenerator" with the ^
-.qhcp project file in %BUILDDIR%/qthelp, like this:
-	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\testtools.qhcp
-	echo.To view the help file:
-	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\testtools.ghc
-	goto end
-)
-
-if "%1" == "latex" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	echo.
-	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "changes" (
-	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
-	echo.
-	echo.The overview file is in %BUILDDIR%/changes.
-	goto end
-)
-
-if "%1" == "linkcheck" (
-	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
-	echo.
-	echo.Link check complete; look for any errors in the above output ^
-or in %BUILDDIR%/linkcheck/output.txt.
-	goto end
-)
-
-if "%1" == "doctest" (
-	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
-	echo.
-	echo.Testing of doctests in the sources finished, look at the ^
-results in %BUILDDIR%/doctest/output.txt.
-	goto end
-)
-
-:end
diff --git a/lib/testtools/doc/overview.rst b/lib/testtools/doc/overview.rst
deleted file mode 100644
index a01dc3d..0000000
--- a/lib/testtools/doc/overview.rst
+++ /dev/null
@@ -1,101 +0,0 @@
-======================================
-testtools: tasteful testing for Python
-======================================
-
-testtools is a set of extensions to the Python standard library's unit testing
-framework. These extensions have been derived from many years of experience
-with unit testing in Python and come from many different sources. testtools
-supports Python versions all the way back to Python 2.6.
-
-What better way to start than with a contrived code snippet?::
-
-  from testtools import TestCase
-  from testtools.content import Content
-  from testtools.content_type import UTF8_TEXT
-  from testtools.matchers import Equals
-
-  from myproject import SillySquareServer
-
-  class TestSillySquareServer(TestCase):
-
-      def setUp(self):
-          super(TestSillySquare, self).setUp()
-          self.server = self.useFixture(SillySquareServer())
-          self.addCleanup(self.attach_log_file)
-
-      def attach_log_file(self):
-          self.addDetail(
-              'log-file',
-              Content(UTF8_TEXT,
-                      lambda: open(self.server.logfile, 'r').readlines()))
-
-      def test_server_is_cool(self):
-          self.assertThat(self.server.temperature, Equals("cool"))
-
-      def test_square(self):
-          self.assertThat(self.server.silly_square_of(7), Equals(49))
-
-
-Why use testtools?
-==================
-
-Better assertion methods
-------------------------
-
-The standard assertion methods that come with unittest aren't as helpful as
-they could be, and there aren't quite enough of them.  testtools adds
-``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
-
-
-Matchers: better than assertion methods
----------------------------------------
-
-Of course, in any serious project you want to be able to have assertions that
-are specific to that project and the particular problem that it is addressing.
-Rather than forcing you to define your own assertion methods and maintain your
-own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
-your own "matchers", custom predicates that can be plugged into a unit test::
-
-  def test_response_has_bold(self):
-     # The response has bold text.
-     response = self.server.getResponse()
-     self.assertThat(response, HTMLContains(Tag('bold', 'b')))
-
-
-More debugging info, when you need it
---------------------------------------
-
-testtools makes it easy to add arbitrary data to your test result.  If you
-want to know what's in a log file when a test fails, or what the load was on
-the computer when a test started, or what files were open, you can add that
-information with ``TestCase.addDetail``, and it will appear in the test
-results if that test fails.
-
-
-Extend unittest, but stay compatible and re-usable
---------------------------------------------------
-
-testtools goes to great lengths to allow serious test authors and test
-*framework* authors to do whatever they like with their tests and their
-extensions while staying compatible with the standard library's unittest.
-
-testtools has completely parametrized how exceptions raised in tests are
-mapped to ``TestResult`` methods and how tests are actually executed (ever
-wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
-
-It also provides many simple but handy utilities, like the ability to clone a
-test, a ``MultiTestResult`` object that lets many result objects get the
-results from one test suite, adapters to bring legacy ``TestResult`` objects
-into our new golden age.
-
-
-Cross-Python compatibility
---------------------------
-
-testtools gives you the very latest in unit testing technology in a way that
-will work with Python 2.6, 2.7, 3.1 and 3.2.
-
-If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
-0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
-constraints involved in not using the newer language features onerous as we
-added more support for versions post Python 3.
diff --git a/lib/testtools/scripts/README b/lib/testtools/scripts/README
deleted file mode 100644
index 648f105..0000000
--- a/lib/testtools/scripts/README
+++ /dev/null
@@ -1,3 +0,0 @@
-These are scripts to help with building, maintaining and releasing testtools.
-
-There is little here for anyone except a testtools contributor.
diff --git a/lib/testtools/scripts/_lp_release.py b/lib/testtools/scripts/_lp_release.py
deleted file mode 100644
index ac27e47..0000000
--- a/lib/testtools/scripts/_lp_release.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/python
-
-"""Release testtools on Launchpad.
-
-Steps:
- 1. Make sure all "Fix committed" bugs are assigned to 'next'
- 2. Rename 'next' to the new version
- 3. Release the milestone
- 4. Upload the tarball
- 5. Create a new 'next' milestone
- 6. Mark all "Fix committed" bugs in the milestone as "Fix released"
-
-Assumes that NEWS is in the parent directory, that the release sections are
-underlined with '~' and the subsections are underlined with '-'.
-
-Assumes that this file is in the 'scripts' directory a testtools tree that has
-already had a tarball built and uploaded with 'python setup.py sdist upload
---sign'.
-"""
-
-from datetime import datetime, timedelta, tzinfo
-import logging
-import os
-import sys
-
-from launchpadlib.launchpad import Launchpad
-from launchpadlib import uris
-
-
-APP_NAME = 'testtools-lp-release'
-CACHE_DIR = os.path.expanduser('~/.launchpadlib/cache')
-SERVICE_ROOT = uris.LPNET_SERVICE_ROOT
-
-FIX_COMMITTED = u"Fix Committed"
-FIX_RELEASED = u"Fix Released"
-
-# Launchpad file type for a tarball upload.
-CODE_RELEASE_TARBALL = 'Code Release Tarball'
-
-PROJECT_NAME = 'testtools'
-NEXT_MILESTONE_NAME = 'next'
-
-
-class _UTC(tzinfo):
-    """UTC"""
-
-    def utcoffset(self, dt):
-        return timedelta(0)
-
-    def tzname(self, dt):
-        return "UTC"
-
-    def dst(self, dt):
-        return timedelta(0)
-
-UTC = _UTC()
-
-
-def configure_logging():
-    level = logging.INFO
-    log = logging.getLogger(APP_NAME)
-    log.setLevel(level)
-    handler = logging.StreamHandler()
-    handler.setLevel(level)
-    formatter = logging.Formatter("%(levelname)s: %(message)s")
-    handler.setFormatter(formatter)
-    log.addHandler(handler)
-    return log
-LOG = configure_logging()
-
-
-def get_path(relpath):
-    """Get the absolute path for something relative to this file."""
-    return os.path.abspath(
-        os.path.join(
-            os.path.dirname(os.path.dirname(__file__)), relpath))
-
-
-def assign_fix_committed_to_next(testtools, next_milestone):
-    """Find all 'Fix Committed' and make sure they are in 'next'."""
-    fixed_bugs = list(testtools.searchTasks(status=FIX_COMMITTED))
-    for task in fixed_bugs:
-        LOG.debug("%s" % (task.title,))
-        if task.milestone != next_milestone:
-            task.milestone = next_milestone
-            LOG.info("Re-assigning %s" % (task.title,))
-            task.lp_save()
-
-
-def rename_milestone(next_milestone, new_name):
-    """Rename 'next_milestone' to 'new_name'."""
-    LOG.info("Renaming %s to %s" % (next_milestone.name, new_name))
-    next_milestone.name = new_name
-    next_milestone.lp_save()
-
-
-def get_release_notes_and_changelog(news_path):
-    release_notes = []
-    changelog = []
-    state = None
-    last_line = None
-
-    def is_heading_marker(line, marker_char):
-        return line and line == marker_char * len(line)
-
-    LOG.debug("Loading NEWS from %s" % (news_path,))
-    with open(news_path, 'r') as news:
-        for line in news:
-            line = line.strip()
-            if state is None:
-                if (is_heading_marker(line, '~') and
-                    not last_line.startswith('NEXT')):
-                    milestone_name = last_line
-                    state = 'release-notes'
-                else:
-                    last_line = line
-            elif state == 'title':
-                # The line after the title is a heading marker line, so we
-                # ignore it and change state. That which follows are the
-                # release notes.
-                state = 'release-notes'
-            elif state == 'release-notes':
-                if is_heading_marker(line, '-'):
-                    state = 'changelog'
-                    # Last line in the release notes is actually the first
-                    # line of the changelog.
-                    changelog = [release_notes.pop(), line]
-                else:
-                    release_notes.append(line)
-            elif state == 'changelog':
-                if is_heading_marker(line, '~'):
-                    # Last line in changelog is actually the first line of the
-                    # next section.
-                    changelog.pop()
-                    break
-                else:
-                    changelog.append(line)
-            else:
-                raise ValueError("Couldn't parse NEWS")
-
-    release_notes = '\n'.join(release_notes).strip() + '\n'
-    changelog = '\n'.join(changelog).strip() + '\n'
-    return milestone_name, release_notes, changelog
-
-
-def release_milestone(milestone, release_notes, changelog):
-    date_released = datetime.now(tz=UTC)
-    LOG.info(
-        "Releasing milestone: %s, date %s" % (milestone.name, date_released))
-    release = milestone.createProductRelease(
-        date_released=date_released,
-        changelog=changelog,
-        release_notes=release_notes,
-        )
-    milestone.is_active = False
-    milestone.lp_save()
-    return release
-
-
-def create_milestone(series, name):
-    """Create a new milestone in the same series as 'release_milestone'."""
-    LOG.info("Creating milestone %s in series %s" % (name, series.name))
-    return series.newMilestone(name=name)
-
-
-def close_fixed_bugs(milestone):
-    tasks = list(milestone.searchTasks())
-    for task in tasks:
-        LOG.debug("Found %s" % (task.title,))
-        if task.status == FIX_COMMITTED:
-            LOG.info("Closing %s" % (task.title,))
-            task.status = FIX_RELEASED
-        else:
-            LOG.warning(
-                "Bug not fixed, removing from milestone: %s" % (task.title,))
-            task.milestone = None
-        task.lp_save()
-
-
-def upload_tarball(release, tarball_path):
-    with open(tarball_path) as tarball:
-        tarball_content = tarball.read()
-    sig_path = tarball_path + '.asc'
-    with open(sig_path) as sig:
-        sig_content = sig.read()
-    tarball_name = os.path.basename(tarball_path)
-    LOG.info("Uploading tarball: %s" % (tarball_path,))
-    release.add_file(
-        file_type=CODE_RELEASE_TARBALL,
-        file_content=tarball_content, filename=tarball_name,
-        signature_content=sig_content,
-        signature_filename=sig_path,
-        content_type="application/x-gzip; charset=binary")
-
-
-def release_project(launchpad, project_name, next_milestone_name):
-    testtools = launchpad.projects[project_name]
-    next_milestone = testtools.getMilestone(name=next_milestone_name)
-    release_name, release_notes, changelog = get_release_notes_and_changelog(
-        get_path('NEWS'))
-    LOG.info("Releasing %s %s" % (project_name, release_name))
-    # Since reversing these operations is hard, and inspecting errors from
-    # Launchpad is also difficult, do some looking before leaping.
-    errors = []
-    tarball_path = get_path('dist/%s-%s.tar.gz' % (project_name, release_name,))
-    if not os.path.isfile(tarball_path):
-        errors.append("%s does not exist" % (tarball_path,))
-    if not os.path.isfile(tarball_path + '.asc'):
-        errors.append("%s does not exist" % (tarball_path + '.asc',))
-    if testtools.getMilestone(name=release_name):
-        errors.append("Milestone %s exists on %s" % (release_name, project_name))
-    if errors:
-        for error in errors:
-            LOG.error(error)
-        return 1
-    assign_fix_committed_to_next(testtools, next_milestone)
-    rename_milestone(next_milestone, release_name)
-    release = release_milestone(next_milestone, release_notes, changelog)
-    upload_tarball(release, tarball_path)
-    create_milestone(next_milestone.series_target, next_milestone_name)
-    close_fixed_bugs(next_milestone)
-    return 0
-
-
-def main(args):
-    launchpad = Launchpad.login_with(
-        APP_NAME, SERVICE_ROOT, CACHE_DIR, credentials_file='.lp_creds')
-    return release_project(launchpad, PROJECT_NAME, NEXT_MILESTONE_NAME)
-
-
-if __name__ == '__main__':
-    sys.exit(main(sys.argv))
diff --git a/lib/testtools/scripts/all-pythons b/lib/testtools/scripts/all-pythons
deleted file mode 100755
index 10fd6de..0000000
--- a/lib/testtools/scripts/all-pythons
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/python
-
-"""Run the testtools test suite for all supported Pythons.
-
-Prints output as a subunit test suite. If anything goes to stderr, that is
-treated as a test error. If a Python is not available, then it is skipped.
-"""
-
-from datetime import datetime
-import os
-import subprocess
-import sys
-
-import subunit
-from subunit import (
-    iso8601,
-    _make_stream_binary,
-    TestProtocolClient,
-    TestProtocolServer,
-    )
-from testtools import (
-    PlaceHolder,
-    TestCase,
-    )
-from testtools.compat import BytesIO
-from testtools.content import text_content
-
-
-ROOT = os.path.dirname(os.path.dirname(__file__))
-
-
-def run_for_python(version, result, tests):
-    if not tests:
-        tests = ['testtools.tests.test_suite']
-    # XXX: This could probably be broken up and put into subunit.
-    python = 'python%s' % (version,)
-    # XXX: Correct API, but subunit doesn't support it. :(
-    # result.tags(set(python), set())
-    result.time(now())
-    test = PlaceHolder(''.join(c for c in python if c != '.'))
-    process = subprocess.Popen(
-        '%s -c pass' % (python,), shell=True,
-        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    process.communicate()
-
-    if process.returncode:
-        result.startTest(test)
-        result.addSkip(test, reason='%s not available' % (python,))
-        result.stopTest(test)
-        return
-
-    env = os.environ.copy()
-    if env.get('PYTHONPATH', None):
-        env['PYTHONPATH'] = os.pathsep.join([ROOT, env['PYTHONPATH']])
-    else:
-        env['PYTHONPATH'] = ROOT
-    result.time(now())
-    protocol = TestProtocolServer(result)
-    subunit_path = os.path.join(os.path.dirname(subunit.__file__), 'run.py')
-    cmd = [
-        python,
-        '-W', 'ignore:Module testtools was already imported',
-        subunit_path]
-    cmd.extend(tests)
-    process = subprocess.Popen(
-        cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
-    _make_stream_binary(process.stdout)
-    _make_stream_binary(process.stderr)
-    # XXX: This buffers everything. Bad for memory, bad for getting progress
-    # on jenkins.
-    output, error = process.communicate()
-    protocol.readFrom(BytesIO(output))
-    if error:
-        result.startTest(test)
-        result.addError(test, details={
-            'stderr': text_content(error),
-           })
-        result.stopTest(test)
-    result.time(now())
-    # XXX: Correct API, but subunit doesn't support it. :(
-    #result.tags(set(), set(python))
-
-
-def now():
-    return datetime.utcnow().replace(tzinfo=iso8601.Utc())
-
-
-
-if __name__ == '__main__':
-    sys.path.append(ROOT)
-    result = TestProtocolClient(sys.stdout)
-    for version in '2.6 2.7 3.0 3.1 3.2'.split():
-        run_for_python(version, result, sys.argv[1:])
diff --git a/lib/testtools/scripts/update-rtfd b/lib/testtools/scripts/update-rtfd
deleted file mode 100755
index 92a19da..0000000
--- a/lib/testtools/scripts/update-rtfd
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/python
-
-from StringIO import StringIO
-from urllib2 import urlopen
-
-
-WEB_HOOK = 'http://readthedocs.org/build/588'
-
-
-if __name__ == '__main__':
-    urlopen(WEB_HOOK, data='  ')
diff --git a/lib/testtools/setup.cfg b/lib/testtools/setup.cfg
deleted file mode 100644
index 9f95add..0000000
--- a/lib/testtools/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[test]
-test_module = testtools.tests
-buffer=1
-catch=1
diff --git a/lib/testtools/setup.py b/lib/testtools/setup.py
deleted file mode 100755
index dacbf91..0000000
--- a/lib/testtools/setup.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-"""Distutils installer for testtools."""
-
-from setuptools import setup
-from distutils.command.build_py import build_py
-import email
-import os
-import sys
-
-import testtools
-cmd_class = {}
-if getattr(testtools, 'TestCommand', None) is not None:
-    cmd_class['test'] = testtools.TestCommand
-
-
-class testtools_build_py(build_py):
-    def build_module(self, module, module_file, package):
-        if sys.version_info >= (3,) and module == '_compat2x':
-            return
-        return build_py.build_module(self, module, module_file, package)
-cmd_class['build_py'] = testtools_build_py
-
-
-def get_version_from_pkg_info():
-    """Get the version from PKG-INFO file if we can."""
-    pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO')
-    try:
-        pkg_info_file = open(pkg_info_path, 'r')
-    except (IOError, OSError):
-        return None
-    try:
-        pkg_info = email.message_from_file(pkg_info_file)
-    except email.MessageError:
-        return None
-    return pkg_info.get('Version', None)
-
-
-def get_version():
-    """Return the version of testtools that we are building."""
-    version = '.'.join(
-        str(component) for component in testtools.__version__[0:3])
-    phase = testtools.__version__[3]
-    if phase == 'final':
-        return version
-    pkg_info_version = get_version_from_pkg_info()
-    if pkg_info_version:
-        return pkg_info_version
-    # Apparently if we just say "snapshot" then distribute won't accept it
-    # as satisfying versioned dependencies. This is a problem for the
-    # daily build version.
-    return "snapshot-%s" % (version,)
-
-
-def get_long_description():
-    manual_path = os.path.join(
-        os.path.dirname(__file__), 'doc/overview.rst')
-    return open(manual_path).read()
-
-
-setup(name='testtools',
-      author='Jonathan M. Lange',
-      author_email='jml+testtools at mumak.net',
-      url='https://github.com/testing-cabal/testtools',
-      description=('Extensions to the Python standard library unit testing '
-                   'framework'),
-      long_description=get_long_description(),
-      version=get_version(),
-      classifiers=["License :: OSI Approved :: MIT License",
-        "Programming Language :: Python :: 3",
-        ],
-      packages=[
-        'testtools',
-        'testtools.matchers',
-        'testtools.testresult',
-        'testtools.tests',
-        'testtools.tests.matchers',
-        ],
-      cmdclass=cmd_class,
-      zip_safe=False,
-      install_requires=[
-        'extras',
-        # 'mimeparse' has not been uploaded by the maintainer with Python3 compat
-        # but someone kindly uploaded a fixed version as 'python-mimeparse'.
-        'python-mimeparse',
-        ],
-      )
diff --git a/lib/testtools/testtools/__init__.py b/lib/testtools/testtools/__init__.py
deleted file mode 100644
index 973083a..0000000
--- a/lib/testtools/testtools/__init__.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Extensions to the standard Python unittest library."""
-
-__all__ = [
-    'clone_test_with_new_id',
-    'CopyStreamResult',
-    'ConcurrentTestSuite',
-    'ConcurrentStreamTestSuite',
-    'DecorateTestCaseResult',
-    'ErrorHolder',
-    'ExpectedException',
-    'ExtendedToOriginalDecorator',
-    'ExtendedToStreamDecorator',
-    'FixtureSuite',
-    'iterate_tests',
-    'MultipleExceptions',
-    'MultiTestResult',
-    'PlaceHolder',
-    'run_test_with',
-    'Tagger',
-    'TestCase',
-    'TestCommand',
-    'TestByTestResult',
-    'TestResult',
-    'TestResultDecorator',
-    'TextTestResult',
-    'RunTest',
-    'skip',
-    'skipIf',
-    'skipUnless',
-    'StreamFailFast',
-    'StreamResult',
-    'StreamResultRouter',
-    'StreamSummary',
-    'StreamTagger',
-    'StreamToDict',
-    'StreamToExtendedDecorator',
-    'StreamToQueue',
-    'TestControl',
-    'ThreadsafeForwardingResult',
-    'TimestampingStreamResult',
-    'try_import',
-    'try_imports',
-    ]
-
-# Compat - removal announced in 0.9.25.
-try:
-    from extras import (
-        try_import,
-        try_imports,
-        )
-except ImportError:
-    # Support reading __init__ for __version__ without extras, because pip does
-    # not support setup_requires.
-    pass
-else:
-
-    from testtools.matchers._impl import (
-        Matcher,
-        )
-# Shut up, pyflakes. We are importing for documentation, not for namespacing.
-    Matcher
-
-    from testtools.runtest import (
-        MultipleExceptions,
-        RunTest,
-        )
-    from testtools.testcase import (
-        DecorateTestCaseResult,
-        ErrorHolder,
-        ExpectedException,
-        PlaceHolder,
-        TestCase,
-        clone_test_with_new_id,
-        run_test_with,
-        skip,
-        skipIf,
-        skipUnless,
-        )
-    from testtools.testresult import (
-        CopyStreamResult,
-        ExtendedToOriginalDecorator,
-        ExtendedToStreamDecorator,
-        MultiTestResult,
-        StreamFailFast,
-        StreamResult,
-        StreamResultRouter,
-        StreamSummary,
-        StreamTagger,
-        StreamToDict,
-        StreamToExtendedDecorator,
-        StreamToQueue,
-        Tagger,
-        TestByTestResult,
-        TestControl,
-        TestResult,
-        TestResultDecorator,
-        TextTestResult,
-        ThreadsafeForwardingResult,
-        TimestampingStreamResult,
-        )
-    from testtools.testsuite import (
-        ConcurrentTestSuite,
-        ConcurrentStreamTestSuite,
-        FixtureSuite,
-        iterate_tests,
-        )
-    from testtools.distutilscmd import (
-        TestCommand,
-        )
-
-# same format as sys.version_info: "A tuple containing the five components of
-# the version number: major, minor, micro, releaselevel, and serial. All
-# values except releaselevel are integers; the release level is 'alpha',
-# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
-# Python version 2.0 is (2, 0, 0, 'final', 0)."  Additionally we use a
-# releaselevel of 'dev' for unreleased under-development code.
-#
-# If the releaselevel is 'alpha' then the major/minor/micro components are not
-# established at this point, and setup.py will use a version of next-$(revno).
-# If the releaselevel is 'final', then the tarball will be major.minor.micro.
-# Otherwise it is major.minor.micro~$(revno).
-
-__version__ = (1, 1, 0, 'final', 0)
diff --git a/lib/testtools/testtools/_compat2x.py b/lib/testtools/testtools/_compat2x.py
deleted file mode 100644
index 2b25c13..0000000
--- a/lib/testtools/testtools/_compat2x.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2011 testtools developers. See LICENSE for details.
-
-"""Compatibility helpers that are valid syntax in Python 2.x.
-
-Only add things here if they *only* work in Python 2.x or are Python 2
-alternatives to things that *only* work in Python 3.x.
-"""
-
-__all__ = [
-    'reraise',
-    ]
-
-
-def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
-    """Re-raise an exception received from sys.exc_info() or similar."""
-    raise exc_class, exc_obj, exc_tb
-
diff --git a/lib/testtools/testtools/_compat3x.py b/lib/testtools/testtools/_compat3x.py
deleted file mode 100644
index 7a482c1..0000000
--- a/lib/testtools/testtools/_compat3x.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2011 testtools developers. See LICENSE for details.
-
-"""Compatibility helpers that are valid syntax in Python 3.x.
-
-Only add things here if they *only* work in Python 3.x or are Python 3
-alternatives to things that *only* work in Python 2.x.
-"""
-
-__all__ = [
-    'reraise',
-    ]
-
-
-def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
-    """Re-raise an exception received from sys.exc_info() or similar."""
-    raise exc_obj.with_traceback(exc_tb)
-
diff --git a/lib/testtools/testtools/_spinner.py b/lib/testtools/testtools/_spinner.py
deleted file mode 100644
index baf455a..0000000
--- a/lib/testtools/testtools/_spinner.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Evil reactor-spinning logic for running Twisted tests.
-
-This code is highly experimental, liable to change and not to be trusted.  If
-you couldn't write this yourself, you should not be using it.
-"""
-
-__all__ = [
-    'DeferredNotFired',
-    'extract_result',
-    'NoResultError',
-    'not_reentrant',
-    'ReentryError',
-    'Spinner',
-    'StaleJunkError',
-    'TimeoutError',
-    'trap_unhandled_errors',
-    ]
-
-import signal
-
-from testtools.monkey import MonkeyPatcher
-
-from twisted.internet import defer
-from twisted.internet.base import DelayedCall
-from twisted.internet.interfaces import IReactorThreads
-from twisted.python.failure import Failure
-from twisted.python.util import mergeFunctionMetadata
-
-
-class ReentryError(Exception):
-    """Raised when we try to re-enter a function that forbids it."""
-
-    def __init__(self, function):
-        Exception.__init__(self,
-            "%r in not re-entrant but was called within a call to itself."
-            % (function,))
-
-
-def not_reentrant(function, _calls={}):
-    """Decorates a function as not being re-entrant.
-
-    The decorated function will raise an error if called from within itself.
-    """
-    def decorated(*args, **kwargs):
-        if _calls.get(function, False):
-            raise ReentryError(function)
-        _calls[function] = True
-        try:
-            return function(*args, **kwargs)
-        finally:
-            _calls[function] = False
-    return mergeFunctionMetadata(function, decorated)
-
-
-class DeferredNotFired(Exception):
-    """Raised when we extract a result from a Deferred that's not fired yet."""
-
-
-def extract_result(deferred):
-    """Extract the result from a fired deferred.
-
-    It can happen that you have an API that returns Deferreds for
-    compatibility with Twisted code, but is in fact synchronous, i.e. the
-    Deferreds it returns have always fired by the time it returns.  In this
-    case, you can use this function to convert the result back into the usual
-    form for a synchronous API, i.e. the result itself or a raised exception.
-
-    It would be very bad form to use this as some way of checking if a
-    Deferred has fired.
-    """
-    failures = []
-    successes = []
-    deferred.addCallbacks(successes.append, failures.append)
-    if len(failures) == 1:
-        failures[0].raiseException()
-    elif len(successes) == 1:
-        return successes[0]
-    else:
-        raise DeferredNotFired("%r has not fired yet." % (deferred,))
-
-
-def trap_unhandled_errors(function, *args, **kwargs):
-    """Run a function, trapping any unhandled errors in Deferreds.
-
-    Assumes that 'function' will have handled any errors in Deferreds by the
-    time it is complete.  This is almost never true of any Twisted code, since
-    you can never tell when someone has added an errback to a Deferred.
-
-    If 'function' raises, then don't bother doing any unhandled error
-    jiggery-pokery, since something horrible has probably happened anyway.
-
-    :return: A tuple of '(result, error)', where 'result' is the value
-        returned by 'function' and 'error' is a list of 'defer.DebugInfo'
-        objects that have unhandled errors in Deferreds.
-    """
-    real_DebugInfo = defer.DebugInfo
-    debug_infos = []
-    def DebugInfo():
-        info = real_DebugInfo()
-        debug_infos.append(info)
-        return info
-    defer.DebugInfo = DebugInfo
-    try:
-        result = function(*args, **kwargs)
-    finally:
-        defer.DebugInfo = real_DebugInfo
-    errors = []
-    for info in debug_infos:
-        if info.failResult is not None:
-            errors.append(info)
-            # Disable the destructor that logs to error. We are already
-            # catching the error here.
-            info.__del__ = lambda: None
-    return result, errors
-
-
-class TimeoutError(Exception):
-    """Raised when run_in_reactor takes too long to run a function."""
-
-    def __init__(self, function, timeout):
-        Exception.__init__(self,
-            "%r took longer than %s seconds" % (function, timeout))
-
-
-class NoResultError(Exception):
-    """Raised when the reactor has stopped but we don't have any result."""
-
-    def __init__(self):
-        Exception.__init__(self,
-            "Tried to get test's result from Deferred when no result is "
-            "available.  Probably means we received SIGINT or similar.")
-
-
-class StaleJunkError(Exception):
-    """Raised when there's junk in the spinner from a previous run."""
-
-    def __init__(self, junk):
-        Exception.__init__(self,
-            "There was junk in the spinner from a previous run. "
-            "Use clear_junk() to clear it out: %r" % (junk,))
-
-
-class Spinner(object):
-    """Spin the reactor until a function is done.
-
-    This class emulates the behaviour of twisted.trial in that it grotesquely
-    and horribly spins the Twisted reactor while a function is running, and
-    then kills the reactor when that function is complete and all the
-    callbacks in its chains are done.
-    """
-
-    _UNSET = object()
-
-    # Signals that we save and restore for each spin.
-    _PRESERVED_SIGNALS = [
-        'SIGINT',
-        'SIGTERM',
-        'SIGCHLD',
-        ]
-
-    # There are many APIs within Twisted itself where a Deferred fires but
-    # leaves cleanup work scheduled for the reactor to do.  Arguably, many of
-    # these are bugs.  As such, we provide a facility to iterate the reactor
-    # event loop a number of times after every call, in order to shake out
-    # these buggy-but-commonplace events.  The default is 0, because that is
-    # the ideal, and it actually works for many cases.
-    _OBLIGATORY_REACTOR_ITERATIONS = 0
-
-    def __init__(self, reactor, debug=False):
-        """Construct a Spinner.
-
-        :param reactor: A Twisted reactor.
-        :param debug: Whether or not to enable Twisted's debugging.  Defaults
-            to False.
-        """
-        self._reactor = reactor
-        self._timeout_call = None
-        self._success = self._UNSET
-        self._failure = self._UNSET
-        self._saved_signals = []
-        self._junk = []
-        self._debug = debug
-
-    def _cancel_timeout(self):
-        if self._timeout_call:
-            self._timeout_call.cancel()
-
-    def _get_result(self):
-        if self._failure is not self._UNSET:
-            self._failure.raiseException()
-        if self._success is not self._UNSET:
-            return self._success
-        raise NoResultError()
-
-    def _got_failure(self, result):
-        self._cancel_timeout()
-        self._failure = result
-
-    def _got_success(self, result):
-        self._cancel_timeout()
-        self._success = result
-
-    def _stop_reactor(self, ignored=None):
-        """Stop the reactor!"""
-        self._reactor.crash()
-
-    def _timed_out(self, function, timeout):
-        e = TimeoutError(function, timeout)
-        self._failure = Failure(e)
-        self._stop_reactor()
-
-    def _clean(self):
-        """Clean up any junk in the reactor.
-
-        Will always iterate the reactor a number of times equal to
-        ``Spinner._OBLIGATORY_REACTOR_ITERATIONS``.  This is to work around
-        bugs in various Twisted APIs where a Deferred fires but still leaves
-        work (e.g. cancelling a call, actually closing a connection) for the
-        reactor to do.
-        """
-        for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
-            self._reactor.iterate(0)
-        junk = []
-        for delayed_call in self._reactor.getDelayedCalls():
-            delayed_call.cancel()
-            junk.append(delayed_call)
-        for selectable in self._reactor.removeAll():
-            # Twisted sends a 'KILL' signal to selectables that provide
-            # IProcessTransport.  Since only _dumbwin32proc processes do this,
-            # we aren't going to bother.
-            junk.append(selectable)
-        if IReactorThreads.providedBy(self._reactor):
-            if self._reactor.threadpool is not None:
-                self._reactor._stopThreadPool()
-        self._junk.extend(junk)
-        return junk
-
-    def clear_junk(self):
-        """Clear out our recorded junk.
-
-        :return: Whatever junk was there before.
-        """
-        junk = self._junk
-        self._junk = []
-        return junk
-
-    def get_junk(self):
-        """Return any junk that has been found on the reactor."""
-        return self._junk
-
-    def _save_signals(self):
-        available_signals = [
-            getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
-        self._saved_signals = [
-            (sig, signal.getsignal(sig)) for sig in available_signals if sig]
-
-    def _restore_signals(self):
-        for sig, hdlr in self._saved_signals:
-            signal.signal(sig, hdlr)
-        self._saved_signals = []
-
-    @not_reentrant
-    def run(self, timeout, function, *args, **kwargs):
-        """Run 'function' in a reactor.
-
-        If 'function' returns a Deferred, the reactor will keep spinning until
-        the Deferred fires and its chain completes or until the timeout is
-        reached -- whichever comes first.
-
-        :raise TimeoutError: If 'timeout' is reached before the Deferred
-            returned by 'function' has completed its callback chain.
-        :raise NoResultError: If the reactor is somehow interrupted before
-            the Deferred returned by 'function' has completed its callback
-            chain.
-        :raise StaleJunkError: If there's junk in the spinner from a previous
-            run.
-        :return: Whatever is at the end of the function's callback chain.  If
-            it's an error, then raise that.
-        """
-        debug = MonkeyPatcher()
-        if self._debug:
-            debug.add_patch(defer.Deferred, 'debug', True)
-            debug.add_patch(DelayedCall, 'debug', True)
-        debug.patch()
-        try:
-            junk = self.get_junk()
-            if junk:
-                raise StaleJunkError(junk)
-            self._save_signals()
-            self._timeout_call = self._reactor.callLater(
-                timeout, self._timed_out, function, timeout)
-            # Calling 'stop' on the reactor will make it impossible to
-            # re-start the reactor.  Since the default signal handlers for
-            # TERM, BREAK and INT all call reactor.stop(), we'll patch it over
-            # with crash.  XXX: It might be a better idea to either install
-            # custom signal handlers or to override the methods that are
-            # Twisted's signal handlers.
-            stop, self._reactor.stop = self._reactor.stop, self._reactor.crash
-            def run_function():
-                d = defer.maybeDeferred(function, *args, **kwargs)
-                d.addCallbacks(self._got_success, self._got_failure)
-                d.addBoth(self._stop_reactor)
-            try:
-                self._reactor.callWhenRunning(run_function)
-                self._reactor.run()
-            finally:
-                self._reactor.stop = stop
-                self._restore_signals()
-            try:
-                return self._get_result()
-            finally:
-                self._clean()
-        finally:
-            debug.restore()
diff --git a/lib/testtools/testtools/assertions.py b/lib/testtools/testtools/assertions.py
deleted file mode 100644
index 87fa74b..0000000
--- a/lib/testtools/testtools/assertions.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from testtools.matchers import (
-    Annotate,
-    MismatchError,
-    )
-
-
-def assert_that(matchee, matcher, message='', verbose=False):
-    """Assert that matchee is matched by matcher.
-
-    This should only be used when you need to use a function based
-    matcher, assertThat in Testtools.Testcase is prefered and has more
-    features
-
-    :param matchee: An object to match with matcher.
-    :param matcher: An object meeting the testtools.Matcher protocol.
-    :raises MismatchError: When matcher does not match thing.
-    """
-    matcher = Annotate.if_message(message, matcher)
-    mismatch = matcher.match(matchee)
-    if not mismatch:
-        return
-    raise MismatchError(matchee, matcher, mismatch, verbose)
diff --git a/lib/testtools/testtools/compat.py b/lib/testtools/testtools/compat.py
deleted file mode 100644
index d0a00d1..0000000
--- a/lib/testtools/testtools/compat.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-"""Compatibility support for python 2 and 3."""
-
-__metaclass__ = type
-__all__ = [
-    '_b',
-    '_u',
-    'advance_iterator',
-    'all',
-    'BytesIO',
-    'classtypes',
-    'isbaseexception',
-    'istext',
-    'str_is_unicode',
-    'StringIO',
-    'reraise',
-    'unicode_output_stream',
-    ]
-
-import codecs
-import io
-import linecache
-import locale
-import os
-import re
-import sys
-import traceback
-import unicodedata
-
-from extras import try_imports
-
-BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
-StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
-
-try:
-    from testtools import _compat2x as _compat
-except (SyntaxError, ImportError):
-    from testtools import _compat3x as _compat
-
-reraise = _compat.reraise
-
-
-__u_doc = """A function version of the 'u' prefix.
-
-This is needed becayse the u prefix is not usable in Python 3 but is required
-in Python 2 to get a unicode object.
-
-To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
-it to be _u('\u1234'). The Python 3 interpreter will decode it
-appropriately and the no-op _u for Python 3 lets it through, in Python
-2 we then call unicode-escape in the _u function.
-"""
-
-if sys.version_info > (3, 0):
-    import builtins
-    def _u(s):
-        return s
-    _r = ascii
-    def _b(s):
-        """A byte literal."""
-        return s.encode("latin-1")
-    advance_iterator = next
-    # GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code.
-    def istext(x):
-        return isinstance(x, str)
-    def classtypes():
-        return (type,)
-    str_is_unicode = True
-else:
-    import __builtin__ as builtins
-    def _u(s):
-        # The double replace mangling going on prepares the string for
-        # unicode-escape - \foo is preserved, \u and \U are decoded.
-        return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
-            .replace("\\\\U", "\\U").decode("unicode-escape"))
-    _r = repr
-    def _b(s):
-        return s
-    advance_iterator = lambda it: it.next()
-    def istext(x):
-        return isinstance(x, basestring)
-    def classtypes():
-        import types
-        return (type, types.ClassType)
-    str_is_unicode = sys.platform == "cli"
-
-_u.__doc__ = __u_doc
-
-
-# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces,
-#                there should be better ways to write code needing this.
-if not issubclass(getattr(builtins, "bytes", str), str):
-    def _isbytes(x):
-        return isinstance(x, bytes)
-else:
-    # Never return True on Pythons that provide the name but not the real type
-    def _isbytes(x):
-        return False
-
-
-def _slow_escape(text):
-    """Escape unicode ``text`` leaving printable characters unmodified
-
-    The behaviour emulates the Python 3 implementation of repr, see
-    unicode_repr in unicodeobject.c and isprintable definition.
-
-    Because this iterates over the input a codepoint at a time, it's slow, and
-    does not handle astral characters correctly on Python builds with 16 bit
-    rather than 32 bit unicode type.
-    """
-    output = []
-    for c in text:
-        o = ord(c)
-        if o < 256:
-            if o < 32 or 126 < o < 161:
-                output.append(c.encode("unicode-escape"))
-            elif o == 92:
-                # Separate due to bug in unicode-escape codec in Python 2.4
-                output.append("\\\\")
-            else:
-                output.append(c)
-        else:
-            # To get correct behaviour would need to pair up surrogates here
-            if unicodedata.category(c)[0] in "CZ":
-                output.append(c.encode("unicode-escape"))
-            else:
-                output.append(c)
-    return "".join(output)
-
-
-def text_repr(text, multiline=None):
-    """Rich repr for ``text`` returning unicode, triple quoted if ``multiline``.
-    """
-    is_py3k = sys.version_info > (3, 0)
-    nl = _isbytes(text) and bytes((0xA,)) or "\n"
-    if multiline is None:
-        multiline = nl in text
-    if not multiline and (is_py3k or not str_is_unicode and type(text) is str):
-        # Use normal repr for single line of unicode on Python 3 or bytes
-        return repr(text)
-    prefix = repr(text[:0])[:-2]
-    if multiline:
-        # To escape multiline strings, split and process each line in turn,
-        # making sure that quotes are not escaped.
-        if is_py3k:
-            offset = len(prefix) + 1
-            lines = []
-            for l in text.split(nl):
-                r = repr(l)
-                q = r[-1]
-                lines.append(r[offset:-1].replace("\\" + q, q))
-        elif not str_is_unicode and isinstance(text, str):
-            lines = [l.encode("string-escape").replace("\\'", "'")
-                for l in text.split("\n")]
-        else:
-            lines = [_slow_escape(l) for l in text.split("\n")]
-        # Combine the escaped lines and append two of the closing quotes,
-        # then iterate over the result to escape triple quotes correctly.
-        _semi_done = "\n".join(lines) + "''"
-        p = 0
-        while True:
-            p = _semi_done.find("'''", p)
-            if p == -1:
-                break
-            _semi_done = "\\".join([_semi_done[:p], _semi_done[p:]])
-            p += 2
-        return "".join([prefix, "'''\\\n", _semi_done, "'"])
-    escaped_text = _slow_escape(text)
-    # Determine which quote character to use and if one gets prefixed with a
-    # backslash following the same logic Python uses for repr() on strings
-    quote = "'"
-    if "'" in text:
-        if '"' in text:
-            escaped_text = escaped_text.replace("'", "\\'")
-        else:
-            quote = '"'
-    return "".join([prefix, quote, escaped_text, quote])
-
-
-def unicode_output_stream(stream):
-    """Get wrapper for given stream that writes any unicode without exception
-
-    Characters that can't be coerced to the encoding of the stream, or 'ascii'
-    if valid encoding is not found, will be replaced. The original stream may
-    be returned in situations where a wrapper is determined unneeded.
-
-    The wrapper only allows unicode to be written, not non-ascii bytestrings,
-    which is a good thing to ensure sanity and sanitation.
-    """
-    if (sys.platform == "cli" or
-        isinstance(stream, (io.TextIOWrapper, io.StringIO))):
-        # Best to never encode before writing in IronPython, or if it is
-        # already a TextIO [which in the io library has no encoding
-        # attribute).
-        return stream
-    try:
-        writer = codecs.getwriter(stream.encoding or "")
-    except (AttributeError, LookupError):
-        return codecs.getwriter("ascii")(stream, "replace")
-    if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
-        # The current stream has a unicode encoding so no error handler is needed
-        if sys.version_info > (3, 0):
-            return stream
-        return writer(stream)
-    if sys.version_info > (3, 0):
-        # Python 3 doesn't seem to make this easy, handle a common case
-        try:
-            return stream.__class__(stream.buffer, stream.encoding, "replace",
-                stream.newlines, stream.line_buffering)
-        except AttributeError:
-            pass
-    return writer(stream, "replace")
-
-
-# The default source encoding is actually "iso-8859-1" until Python 2.5 but
-# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
-# treat all versions the same way
-_default_source_encoding = "ascii"
-
-# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
-_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
-
-def _detect_encoding(lines):
-    """Get the encoding of a Python source file from a list of lines as bytes
-
-    This function does less than tokenize.detect_encoding added in Python 3 as
-    it does not attempt to raise a SyntaxError when the interpreter would, it
-    just wants the encoding of a source file Python has already compiled and
-    determined is valid.
-    """
-    if not lines:
-        return _default_source_encoding
-    if lines[0].startswith("\xef\xbb\xbf"):
-        # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
-        return "utf-8"
-    # Only the first two lines of the source file are examined
-    magic = _cookie_search("".join(lines[:2]))
-    if magic is None:
-        return _default_source_encoding
-    encoding = magic.group(1)
-    try:
-        codecs.lookup(encoding)
-    except LookupError:
-        # Some codecs raise something other than LookupError if they don't
-        # support the given error handler, but not the text ones that could
-        # actually be used for Python source code
-        return _default_source_encoding
-    return encoding
-
-
-class _EncodingTuple(tuple):
-    """A tuple type that can have an encoding attribute smuggled on"""
-
-
-def _get_source_encoding(filename):
-    """Detect, cache and return the encoding of Python source at filename"""
-    try:
-        return linecache.cache[filename].encoding
-    except (AttributeError, KeyError):
-        encoding = _detect_encoding(linecache.getlines(filename))
-        if filename in linecache.cache:
-            newtuple = _EncodingTuple(linecache.cache[filename])
-            newtuple.encoding = encoding
-            linecache.cache[filename] = newtuple
-        return encoding
-
-
-def _get_exception_encoding():
-    """Return the encoding we expect messages from the OS to be encoded in"""
-    if os.name == "nt":
-        # GZ 2010-05-24: Really want the codepage number instead, the error
-        #                handling of standard codecs is more deterministic
-        return "mbcs"
-    # GZ 2010-05-23: We need this call to be after initialisation, but there's
-    #                no benefit in asking more than once as it's a global
-    #                setting that can change after the message is formatted.
-    return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
-
-
-def _exception_to_text(evalue):
-    """Try hard to get a sensible text value out of an exception instance"""
-    try:
-        return unicode(evalue)
-    except KeyboardInterrupt:
-        raise
-    except:
-        # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
-        pass
-    try:
-        return str(evalue).decode(_get_exception_encoding(), "replace")
-    except KeyboardInterrupt:
-        raise
-    except:
-        # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
-        pass
-    # Okay, out of ideas, let higher level handle it
-    return None
-
-
-def _format_stack_list(stack_lines):
-    """Format 'stack_lines' and return a list of unicode strings.
-
-    :param stack_lines: A list of filename, lineno, name, and line variables,
-        probably obtained by calling traceback.extract_tb or
-        traceback.extract_stack.
-    """
-    fs_enc = sys.getfilesystemencoding()
-    extracted_list = []
-    for filename, lineno, name, line in stack_lines:
-            extracted_list.append((
-                filename.decode(fs_enc, "replace"),
-                lineno,
-                name.decode("ascii", "replace"),
-                line and line.decode(
-                    _get_source_encoding(filename), "replace")))
-    return traceback.format_list(extracted_list)
-
-
-def _format_exception_only(eclass, evalue):
-    """Format the excption part of a traceback.
-
-    :param eclass: The type of the exception being formatted.
-    :param evalue: The exception instance.
-    :returns: A list of unicode strings.
-    """
-    list = []
-    if evalue is None:
-        # Is a (deprecated) string exception
-        list.append((eclass + "\n").decode("ascii", "replace"))
-        return list
-    if isinstance(evalue, SyntaxError):
-        # Avoid duplicating the special formatting for SyntaxError here,
-        # instead create a new instance with unicode filename and line
-        # Potentially gives duff spacing, but that's a pre-existing issue
-        try:
-            msg, (filename, lineno, offset, line) = evalue
-        except (TypeError, ValueError):
-            pass # Strange exception instance, fall through to generic code
-        else:
-            # Errors during parsing give the line from buffer encoded as
-            # latin-1 or utf-8 or the encoding of the file depending on the
-            # coding and whether the patch for issue #1031213 is applied, so
-            # give up on trying to decode it and just read the file again
-            if line:
-                bytestr = linecache.getline(filename, lineno)
-                if bytestr:
-                    if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
-                        bytestr = bytestr[3:]
-                    line = bytestr.decode(
-                        _get_source_encoding(filename), "replace")
-                    del linecache.cache[filename]
-                else:
-                    line = line.decode("ascii", "replace")
-            if filename:
-                fs_enc = sys.getfilesystemencoding()
-                filename = filename.decode(fs_enc, "replace")
-            evalue = eclass(msg, (filename, lineno, offset, line))
-            list.extend(traceback.format_exception_only(eclass, evalue))
-            return list
-    sclass = eclass.__name__
-    svalue = _exception_to_text(evalue)
-    if svalue:
-        list.append("%s: %s\n" % (sclass, svalue))
-    elif svalue is None:
-        # GZ 2010-05-24: Not a great fallback message, but keep for the moment
-        list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass)))
-    else:
-        list.append(_u("%s\n" % sclass))
-    return list
-
-
-_TB_HEADER = _u('Traceback (most recent call last):\n')
-
-
-def _format_exc_info(eclass, evalue, tb, limit=None):
-    """Format a stack trace and the exception information as unicode
-
-    Compatibility function for Python 2 which ensures each component of a
-    traceback is correctly decoded according to its origins.
-
-    Based on traceback.format_exception and related functions.
-    """
-    return [_TB_HEADER] \
-        + _format_stack_list(traceback.extract_tb(tb, limit)) \
-        + _format_exception_only(eclass, evalue)
diff --git a/lib/testtools/testtools/content.py b/lib/testtools/testtools/content.py
deleted file mode 100644
index 101b631..0000000
--- a/lib/testtools/testtools/content.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-"""Content - a MIME-like Content object."""
-
-__all__ = [
-    'attach_file',
-    'Content',
-    'content_from_file',
-    'content_from_stream',
-    'json_content',
-    'text_content',
-    'TracebackContent',
-    ]
-
-import codecs
-import inspect
-import json
-import os
-import sys
-import traceback
-
-from extras import try_import
-
-from testtools.compat import (
-    _b,
-    _format_exception_only,
-    _format_stack_list,
-    _TB_HEADER,
-    _u,
-    istext,
-    str_is_unicode,
-)
-from testtools.content_type import ContentType, JSON, UTF8_TEXT
-
-
-functools = try_import('functools')
-
-_join_b = _b("").join
-
-
-DEFAULT_CHUNK_SIZE = 4096
-
-STDOUT_LINE = '\nStdout:\n%s'
-STDERR_LINE = '\nStderr:\n%s'
-
-
-def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
-    """Read 'stream' in chunks of 'chunk_size'.
-
-    :param stream: A file-like object to read from.
-    :param chunk_size: The size of each read from 'stream'.
-    :param seek_offset: If non-None, seek before iterating.
-    :param seek_whence: Pass through to the seek call, if seeking.
-    """
-    if seek_offset is not None:
-        stream.seek(seek_offset, seek_whence)
-    chunk = stream.read(chunk_size)
-    while chunk:
-        yield chunk
-        chunk = stream.read(chunk_size)
-
-
-class Content(object):
-    """A MIME-like Content object.
-
-    'Content' objects can be serialised to bytes using the iter_bytes method.
-    If the 'Content-Type' is recognised by other code, they are welcome to
-    look for richer contents that mere byte serialisation - for example in
-    memory object graphs etc. However, such code MUST be prepared to receive
-    a generic 'Content' object that has been reconstructed from a byte stream.
-
-    :ivar content_type: The content type of this Content.
-    """
-
-    def __init__(self, content_type, get_bytes):
-        """Create a ContentType."""
-        if None in (content_type, get_bytes):
-            raise ValueError("None not permitted in %r, %r" % (
-                content_type, get_bytes))
-        self.content_type = content_type
-        self._get_bytes = get_bytes
-
-    def __eq__(self, other):
-        return (self.content_type == other.content_type and
-            _join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
-
-    def as_text(self):
-        """Return all of the content as text.
-
-        This is only valid where ``iter_text`` is.  It will load all of the
-        content into memory.  Where this is a concern, use ``iter_text``
-        instead.
-        """
-        return _u('').join(self.iter_text())
-
-    def iter_bytes(self):
-        """Iterate over bytestrings of the serialised content."""
-        return self._get_bytes()
-
-    def iter_text(self):
-        """Iterate over the text of the serialised content.
-
-        This is only valid for text MIME types, and will use ISO-8859-1 if
-        no charset parameter is present in the MIME type. (This is somewhat
-        arbitrary, but consistent with RFC2617 3.7.1).
-
-        :raises ValueError: If the content type is not text/\*.
-        """
-        if self.content_type.type != "text":
-            raise ValueError("Not a text type %r" % self.content_type)
-        return self._iter_text()
-
-    def _iter_text(self):
-        """Worker for iter_text - does the decoding."""
-        encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
-        decoder = codecs.getincrementaldecoder(encoding)()
-        for bytes in self.iter_bytes():
-            yield decoder.decode(bytes)
-        final = decoder.decode(_b(''), True)
-        if final:
-            yield final
-
-    def __repr__(self):
-        return "<Content type=%r, value=%r>" % (
-            self.content_type, _join_b(self.iter_bytes()))
-
-
-class StackLinesContent(Content):
-    """Content object for stack lines.
-
-    This adapts a list of "preprocessed" stack lines into a 'Content' object.
-    The stack lines are most likely produced from ``traceback.extract_stack``
-    or ``traceback.extract_tb``.
-
-    text/x-traceback;language=python is used for the mime type, in order to
-    provide room for other languages to format their tracebacks differently.
-    """
-
-    # Whether or not to hide layers of the stack trace that are
-    # unittest/testtools internal code.  Defaults to True since the
-    # system-under-test is rarely unittest or testtools.
-    HIDE_INTERNAL_STACK = True
-
-    def __init__(self, stack_lines, prefix_content="", postfix_content=""):
-        """Create a StackLinesContent for ``stack_lines``.
-
-        :param stack_lines: A list of preprocessed stack lines, probably
-            obtained by calling ``traceback.extract_stack`` or
-            ``traceback.extract_tb``.
-        :param prefix_content: If specified, a unicode string to prepend to the
-            text content.
-        :param postfix_content: If specified, a unicode string to append to the
-            text content.
-        """
-        content_type = ContentType('text', 'x-traceback',
-            {"language": "python", "charset": "utf8"})
-        value = prefix_content + \
-            self._stack_lines_to_unicode(stack_lines) + \
-            postfix_content
-        super(StackLinesContent, self).__init__(
-            content_type, lambda: [value.encode("utf8")])
-
-    def _stack_lines_to_unicode(self, stack_lines):
-        """Converts a list of pre-processed stack lines into a unicode string.
-        """
-
-        # testtools customization. When str is unicode (e.g. IronPython,
-        # Python 3), traceback.format_exception returns unicode. For Python 2,
-        # it returns bytes. We need to guarantee unicode.
-        if str_is_unicode:
-            format_stack_lines = traceback.format_list
-        else:
-            format_stack_lines = _format_stack_list
-
-        msg_lines = format_stack_lines(stack_lines)
-
-        return ''.join(msg_lines)
-
-
-def TracebackContent(err, test):
-    """Content object for tracebacks.
-
-    This adapts an exc_info tuple to the 'Content' interface.
-    'text/x-traceback;language=python' is used for the mime type, in order to
-    provide room for other languages to format their tracebacks differently.
-    """
-    if err is None:
-        raise ValueError("err may not be None")
-
-    exctype, value, tb = err
-    # Skip test runner traceback levels
-    if StackLinesContent.HIDE_INTERNAL_STACK:
-        while tb and '__unittest' in tb.tb_frame.f_globals:
-            tb = tb.tb_next
-
-    # testtools customization. When str is unicode (e.g. IronPython,
-    # Python 3), traceback.format_exception_only returns unicode. For Python 2,
-    # it returns bytes. We need to guarantee unicode.
-    if str_is_unicode:
-        format_exception_only = traceback.format_exception_only
-    else:
-        format_exception_only = _format_exception_only
-
-    limit = None
-    # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
-    if (False
-        and StackLinesContent.HIDE_INTERNAL_STACK
-        and test.failureException
-        and isinstance(value, test.failureException)):
-        # Skip assert*() traceback levels
-        limit = 0
-        while tb and not self._is_relevant_tb_level(tb):
-            limit += 1
-            tb = tb.tb_next
-
-    prefix = _TB_HEADER
-    stack_lines = traceback.extract_tb(tb, limit)
-    postfix = ''.join(format_exception_only(exctype, value))
-
-    return StackLinesContent(stack_lines, prefix, postfix)
-
-
-def StacktraceContent(prefix_content="", postfix_content=""):
-    """Content object for stack traces.
-
-    This function will create and return a 'Content' object that contains a
-    stack trace.
-
-    The mime type is set to 'text/x-traceback;language=python', so other
-    languages can format their stack traces differently.
-
-    :param prefix_content: A unicode string to add before the stack lines.
-    :param postfix_content: A unicode string to add after the stack lines.
-    """
-    stack = inspect.stack()[1:]
-
-    if StackLinesContent.HIDE_INTERNAL_STACK:
-        limit = 1
-        while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals:
-            limit += 1
-    else:
-        limit = -1
-
-    frames_only = [line[0] for line in stack[:limit]]
-    processed_stack = [ ]
-    for frame in reversed(frames_only):
-        filename, line, function, context, _ = inspect.getframeinfo(frame)
-        context = ''.join(context)
-        processed_stack.append((filename, line, function, context))
-    return StackLinesContent(processed_stack, prefix_content, postfix_content)
-
-
-def json_content(json_data):
-    """Create a JSON Content object from JSON-encodeable data."""
-    data = json.dumps(json_data)
-    if str_is_unicode:
-        # The json module perversely returns native str not bytes
-        data = data.encode('utf8')
-    return Content(JSON, lambda: [data])
-
-
-def text_content(text):
-    """Create a Content object from some text.
-
-    This is useful for adding details which are short strings.
-    """
-    if not istext(text):
-        raise TypeError(
-            "text_content must be given text, not '%s'." % type(text).__name__
-        )
-    return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
-
-
-def maybe_wrap(wrapper, func):
-    """Merge metadata for func into wrapper if functools is present."""
-    if functools is not None:
-        wrapper = functools.update_wrapper(wrapper, func)
-    return wrapper
-
-
-def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
-                      buffer_now=False, seek_offset=None, seek_whence=0):
-    """Create a Content object from a file on disk.
-
-    Note that unless ``buffer_now`` is explicitly passed in as True, the file
-    will only be read from when ``iter_bytes`` is called.
-
-    :param path: The path to the file to be used as content.
-    :param content_type: The type of content.  If not specified, defaults
-        to UTF8-encoded text/plain.
-    :param chunk_size: The size of chunks to read from the file.
-        Defaults to ``DEFAULT_CHUNK_SIZE``.
-    :param buffer_now: If True, read the file from disk now and keep it in
-        memory. Otherwise, only read when the content is serialized.
-    :param seek_offset: If non-None, seek within the stream before reading it.
-    :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
-    """
-    if content_type is None:
-        content_type = UTF8_TEXT
-    def reader():
-        with open(path, 'rb') as stream:
-            for chunk in _iter_chunks(stream,
-                                      chunk_size,
-                                      seek_offset,
-                                      seek_whence):
-                yield chunk
-    return content_from_reader(reader, content_type, buffer_now)
-
-
-def content_from_stream(stream, content_type=None,
-                        chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
-                        seek_offset=None, seek_whence=0):
-    """Create a Content object from a file-like stream.
-
-    Note that unless ``buffer_now`` is explicitly passed in as True, the stream
-    will only be read from when ``iter_bytes`` is called.
-
-    :param stream: A file-like object to read the content from. The stream
-        is not closed by this function or the 'Content' object it returns.
-    :param content_type: The type of content. If not specified, defaults
-        to UTF8-encoded text/plain.
-    :param chunk_size: The size of chunks to read from the file.
-        Defaults to ``DEFAULT_CHUNK_SIZE``.
-    :param buffer_now: If True, reads from the stream right now. Otherwise,
-        only reads when the content is serialized. Defaults to False.
-    :param seek_offset: If non-None, seek within the stream before reading it.
-    :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
-    """
-    if content_type is None:
-        content_type = UTF8_TEXT
-    reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
-    return content_from_reader(reader, content_type, buffer_now)
-
-
-def content_from_reader(reader, content_type, buffer_now):
-    """Create a Content object that will obtain the content from reader.
-
-    :param reader: A callback to read the content. Should return an iterable of
-        bytestrings.
-    :param content_type: The content type to create.
-    :param buffer_now: If True the reader is evaluated immediately and
-        buffered.
-    """
-    if content_type is None:
-        content_type = UTF8_TEXT
-    if buffer_now:
-        contents = list(reader())
-        reader = lambda: contents
-    return Content(content_type, reader)
-
-
-def attach_file(detailed, path, name=None, content_type=None,
-                chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
-    """Attach a file to this test as a detail.
-
-    This is a convenience method wrapping around ``addDetail``.
-
-    Note that by default the contents of the file will be read immediately. If
-    ``buffer_now`` is False, then the file *must* exist when the test result is
-    called with the results of this test, after the test has been torn down.
-
-    :param detailed: An object with details
-    :param path: The path to the file to attach.
-    :param name: The name to give to the detail for the attached file.
-    :param content_type: The content type of the file.  If not provided,
-        defaults to UTF8-encoded text/plain.
-    :param chunk_size: The size of chunks to read from the file.  Defaults
-        to something sensible.
-    :param buffer_now: If False the file content is read when the content
-        object is evaluated rather than when attach_file is called.
-        Note that this may be after any cleanups that obj_with_details has, so
-        if the file is a temporary file disabling buffer_now may cause the file
-        to be read after it is deleted. To handle those cases, using
-        attach_file as a cleanup is recommended because it guarantees a
-        sequence for when the attach_file call is made::
-
-            detailed.addCleanup(attach_file, 'foo.txt', detailed)
-    """
-    if name is None:
-        name = os.path.basename(path)
-    content_object = content_from_file(
-        path, content_type, chunk_size, buffer_now)
-    detailed.addDetail(name, content_object)
diff --git a/lib/testtools/testtools/content_type.py b/lib/testtools/testtools/content_type.py
deleted file mode 100644
index bbf314b..0000000
--- a/lib/testtools/testtools/content_type.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-"""ContentType - a MIME Content Type."""
-
-
-class ContentType(object):
-    """A content type from http://www.iana.org/assignments/media-types/
-
-    :ivar type: The primary type, e.g. "text" or "application"
-    :ivar subtype: The subtype, e.g. "plain" or "octet-stream"
-    :ivar parameters: A dict of additional parameters specific to the
-        content type.
-    """
-
-    def __init__(self, primary_type, sub_type, parameters=None):
-        """Create a ContentType."""
-        if None in (primary_type, sub_type):
-            raise ValueError("None not permitted in %r, %r" % (
-                primary_type, sub_type))
-        self.type = primary_type
-        self.subtype = sub_type
-        self.parameters = parameters or {}
-
-    def __eq__(self, other):
-        if type(other) != ContentType:
-            return False
-        return self.__dict__ == other.__dict__
-
-    def __repr__(self):
-        if self.parameters:
-            params = '; '
-            params += '; '.join(
-                sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
-        else:
-            params = ''
-        return "%s/%s%s" % (self.type, self.subtype, params)
-
-
-JSON = ContentType('application', 'json')
-
-UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
diff --git a/lib/testtools/testtools/deferredruntest.py b/lib/testtools/testtools/deferredruntest.py
deleted file mode 100644
index d22c79f..0000000
--- a/lib/testtools/testtools/deferredruntest.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Individual test case execution for tests that return Deferreds.
-
-This module is highly experimental and is liable to change in ways that cause
-subtle failures in tests.  Use at your own peril.
-"""
-
-__all__ = [
-    'assert_fails_with',
-    'AsynchronousDeferredRunTest',
-    'AsynchronousDeferredRunTestForBrokenTwisted',
-    'SynchronousDeferredRunTest',
-    ]
-
-import sys
-
-from testtools.compat import StringIO
-from testtools.content import (
-    Content,
-    text_content,
-    )
-from testtools.content_type import UTF8_TEXT
-from testtools.runtest import RunTest
-from testtools._spinner import (
-    extract_result,
-    NoResultError,
-    Spinner,
-    TimeoutError,
-    trap_unhandled_errors,
-    )
-
-from twisted.internet import defer
-from twisted.python import log
-from twisted.trial.unittest import _LogObserver
-
-
-class _DeferredRunTest(RunTest):
-    """Base for tests that return Deferreds."""
-
-    def _got_user_failure(self, failure, tb_label='traceback'):
-        """We got a failure from user code."""
-        return self._got_user_exception(
-            (failure.type, failure.value, failure.getTracebackObject()),
-            tb_label=tb_label)
-
-
-class SynchronousDeferredRunTest(_DeferredRunTest):
-    """Runner for tests that return synchronous Deferreds."""
-
-    def _run_user(self, function, *args):
-        d = defer.maybeDeferred(function, *args)
-        d.addErrback(self._got_user_failure)
-        result = extract_result(d)
-        return result
-
-
-def run_with_log_observers(observers, function, *args, **kwargs):
-    """Run 'function' with the given Twisted log observers."""
-    real_observers = list(log.theLogPublisher.observers)
-    for observer in real_observers:
-        log.theLogPublisher.removeObserver(observer)
-    for observer in observers:
-        log.theLogPublisher.addObserver(observer)
-    try:
-        return function(*args, **kwargs)
-    finally:
-        for observer in observers:
-            log.theLogPublisher.removeObserver(observer)
-        for observer in real_observers:
-            log.theLogPublisher.addObserver(observer)
-
-
-# Observer of the Twisted log that we install during tests.
-_log_observer = _LogObserver()
-
-
-
-class AsynchronousDeferredRunTest(_DeferredRunTest):
-    """Runner for tests that return Deferreds that fire asynchronously.
-
-    That is, this test runner assumes that the Deferreds will only fire if the
-    reactor is left to spin for a while.
-
-    Do not rely too heavily on the nuances of the behaviour of this class.
-    What it does to the reactor is black magic, and if we can find nicer ways
-    of doing it we will gladly break backwards compatibility.
-
-    This is highly experimental code.  Use at your own risk.
-    """
-
-    def __init__(self, case, handlers=None, last_resort=None, reactor=None,
-                 timeout=0.005, debug=False):
-        """Construct an `AsynchronousDeferredRunTest`.
-
-        Please be sure to always use keyword syntax, not positional, as the
-        base class may add arguments in future - and for core code
-        compatibility with that we have to insert them before the local
-        parameters.
-
-        :param case: The `TestCase` to run.
-        :param handlers: A list of exception handlers (ExceptionType, handler)
-            where 'handler' is a callable that takes a `TestCase`, a
-            ``testtools.TestResult`` and the exception raised.
-        :param last_resort: Handler to call before re-raising uncatchable
-            exceptions (those for which there is no handler).
-        :param reactor: The Twisted reactor to use.  If not given, we use the
-            default reactor.
-        :param timeout: The maximum time allowed for running a test.  The
-            default is 0.005s.
-        :param debug: Whether or not to enable Twisted's debugging.  Use this
-            to get information about unhandled Deferreds and left-over
-            DelayedCalls.  Defaults to False.
-        """
-        super(AsynchronousDeferredRunTest, self).__init__(
-            case, handlers, last_resort)
-        if reactor is None:
-            from twisted.internet import reactor
-        self._reactor = reactor
-        self._timeout = timeout
-        self._debug = debug
-
-    @classmethod
-    def make_factory(cls, reactor=None, timeout=0.005, debug=False):
-        """Make a factory that conforms to the RunTest factory interface."""
-        # This is horrible, but it means that the return value of the method
-        # will be able to be assigned to a class variable *and* also be
-        # invoked directly.
-        class AsynchronousDeferredRunTestFactory:
-            def __call__(self, case, handlers=None, last_resort=None):
-                return cls(case, handlers, last_resort, reactor, timeout, debug)
-        return AsynchronousDeferredRunTestFactory()
-
-    @defer.deferredGenerator
-    def _run_cleanups(self):
-        """Run the cleanups on the test case.
-
-        We expect that the cleanups on the test case can also return
-        asynchronous Deferreds.  As such, we take the responsibility for
-        running the cleanups, rather than letting TestCase do it.
-        """
-        while self.case._cleanups:
-            f, args, kwargs = self.case._cleanups.pop()
-            d = defer.maybeDeferred(f, *args, **kwargs)
-            thing = defer.waitForDeferred(d)
-            yield thing
-            try:
-                thing.getResult()
-            except Exception:
-                exc_info = sys.exc_info()
-                self.case._report_traceback(exc_info)
-                last_exception = exc_info[1]
-        yield last_exception
-
-    def _make_spinner(self):
-        """Make the `Spinner` to be used to run the tests."""
-        return Spinner(self._reactor, debug=self._debug)
-
-    def _run_deferred(self):
-        """Run the test, assuming everything in it is Deferred-returning.
-
-        This should return a Deferred that fires with True if the test was
-        successful and False if the test was not successful.  It should *not*
-        call addSuccess on the result, because there's reactor clean up that
-        we needs to be done afterwards.
-        """
-        fails = []
-
-        def fail_if_exception_caught(exception_caught):
-            if self.exception_caught == exception_caught:
-                fails.append(None)
-
-        def clean_up(ignored=None):
-            """Run the cleanups."""
-            d = self._run_cleanups()
-            def clean_up_done(result):
-                if result is not None:
-                    self._exceptions.append(result)
-                    fails.append(None)
-            return d.addCallback(clean_up_done)
-
-        def set_up_done(exception_caught):
-            """Set up is done, either clean up or run the test."""
-            if self.exception_caught == exception_caught:
-                fails.append(None)
-                return clean_up()
-            else:
-                d = self._run_user(self.case._run_test_method, self.result)
-                d.addCallback(fail_if_exception_caught)
-                d.addBoth(tear_down)
-                return d
-
-        def tear_down(ignored):
-            d = self._run_user(self.case._run_teardown, self.result)
-            d.addCallback(fail_if_exception_caught)
-            d.addBoth(clean_up)
-            return d
-
-        d = self._run_user(self.case._run_setup, self.result)
-        d.addCallback(set_up_done)
-        d.addBoth(lambda ignored: len(fails) == 0)
-        return d
-
-    def _log_user_exception(self, e):
-        """Raise 'e' and report it as a user exception."""
-        try:
-            raise e
-        except e.__class__:
-            self._got_user_exception(sys.exc_info())
-
-    def _blocking_run_deferred(self, spinner):
-        try:
-            return trap_unhandled_errors(
-                spinner.run, self._timeout, self._run_deferred)
-        except NoResultError:
-            # We didn't get a result at all!  This could be for any number of
-            # reasons, but most likely someone hit Ctrl-C during the test.
-            raise KeyboardInterrupt
-        except TimeoutError:
-            # The function took too long to run.
-            self._log_user_exception(TimeoutError(self.case, self._timeout))
-            return False, []
-
-    def _run_core(self):
-        # Add an observer to trap all logged errors.
-        self.case.reactor = self._reactor
-        error_observer = _log_observer
-        full_log = StringIO()
-        full_observer = log.FileLogObserver(full_log)
-        spinner = self._make_spinner()
-        successful, unhandled = run_with_log_observers(
-            [error_observer.gotEvent, full_observer.emit],
-            self._blocking_run_deferred, spinner)
-
-        self.case.addDetail(
-            'twisted-log', Content(UTF8_TEXT, full_log.readlines))
-
-        logged_errors = error_observer.flushErrors()
-        for logged_error in logged_errors:
-            successful = False
-            self._got_user_failure(logged_error, tb_label='logged-error')
-
-        if unhandled:
-            successful = False
-            for debug_info in unhandled:
-                f = debug_info.failResult
-                info = debug_info._getDebugTracebacks()
-                if info:
-                    self.case.addDetail(
-                        'unhandled-error-in-deferred-debug',
-                        text_content(info))
-                self._got_user_failure(f, 'unhandled-error-in-deferred')
-
-        junk = spinner.clear_junk()
-        if junk:
-            successful = False
-            self._log_user_exception(UncleanReactorError(junk))
-
-        if successful:
-            self.result.addSuccess(self.case, details=self.case.getDetails())
-
-    def _run_user(self, function, *args):
-        """Run a user-supplied function.
-
-        This just makes sure that it returns a Deferred, regardless of how the
-        user wrote it.
-        """
-        d = defer.maybeDeferred(function, *args)
-        return d.addErrback(self._got_user_failure)
-
-
-class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest):
-    """Test runner that works around Twisted brokenness re reactor junk.
-
-    There are many APIs within Twisted itself where a Deferred fires but
-    leaves cleanup work scheduled for the reactor to do.  Arguably, many of
-    these are bugs.  This runner iterates the reactor event loop a number of
-    times after every test, in order to shake out these buggy-but-commonplace
-    events.
-    """
-
-    def _make_spinner(self):
-        spinner = super(
-            AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner()
-        spinner._OBLIGATORY_REACTOR_ITERATIONS = 2
-        return spinner
-
-
-def assert_fails_with(d, *exc_types, **kwargs):
-    """Assert that 'd' will fail with one of 'exc_types'.
-
-    The normal way to use this is to return the result of 'assert_fails_with'
-    from your unit test.
-
-    Note that this function is experimental and unstable.  Use at your own
-    peril; expect the API to change.
-
-    :param d: A Deferred that is expected to fail.
-    :param exc_types: The exception types that the Deferred is expected to
-        fail with.
-    :param failureException: An optional keyword argument.  If provided, will
-        raise that exception instead of
-        ``testtools.TestCase.failureException``.
-    :return: A Deferred that will fail with an ``AssertionError`` if 'd' does
-        not fail with one of the exception types.
-    """
-    failureException = kwargs.pop('failureException', None)
-    if failureException is None:
-        # Avoid circular imports.
-        from testtools import TestCase
-        failureException = TestCase.failureException
-    expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
-    def got_success(result):
-        raise failureException(
-            "%s not raised (%r returned)" % (expected_names, result))
-    def got_failure(failure):
-        if failure.check(*exc_types):
-            return failure.value
-        raise failureException("%s raised instead of %s:\n %s" % (
-            failure.type.__name__, expected_names, failure.getTraceback()))
-    return d.addCallbacks(got_success, got_failure)
-
-
-def flush_logged_errors(*error_types):
-    return _log_observer.flushErrors(*error_types)
-
-
-class UncleanReactorError(Exception):
-    """Raised when the reactor has junk in it."""
-
-    def __init__(self, junk):
-        Exception.__init__(self,
-            "The reactor still thinks it needs to do things. Close all "
-            "connections, kill all processes and make sure all delayed "
-            "calls have either fired or been cancelled:\n%s"
-            % ''.join(map(self._get_junk_info, junk)))
-
-    def _get_junk_info(self, junk):
-        from twisted.internet.base import DelayedCall
-        if isinstance(junk, DelayedCall):
-            ret = str(junk)
-        else:
-            ret = repr(junk)
-        return '  %s\n' % (ret,)
diff --git a/lib/testtools/testtools/distutilscmd.py b/lib/testtools/testtools/distutilscmd.py
deleted file mode 100644
index a4d79dc..0000000
--- a/lib/testtools/testtools/distutilscmd.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright (c) 2010-2011 testtools developers . See LICENSE for details.
-
-"""Extensions to the standard Python unittest library."""
-
-import sys
-
-from distutils.core import Command
-from distutils.errors import DistutilsOptionError
-
-from testtools.run import TestProgram, TestToolsTestRunner
-
-
-class TestCommand(Command):
-    """Command to run unit tests with testtools"""
-
-    description = "run unit tests with testtools"
-
-    user_options = [
-        ('catch', 'c', "Catch ctrl-C and display results so far"),
-        ('buffer', 'b', "Buffer stdout and stderr during tests"),
-        ('failfast', 'f', "Stop on first fail or error"),
-        ('test-module=','m', "Run 'test_suite' in specified module"),
-        ('test-suite=','s',
-         "Test suite to run (e.g. 'some_module.test_suite')")
-    ]
-
-    def __init__(self, dist):
-        Command.__init__(self, dist)
-        self.runner = TestToolsTestRunner(stdout=sys.stdout)
-
-
-    def initialize_options(self):
-        self.test_suite = None
-        self.test_module = None
-        self.catch = None
-        self.buffer = None
-        self.failfast = None
-
-    def finalize_options(self):
-        if self.test_suite is None:
-            if self.test_module is None:
-                raise DistutilsOptionError(
-                    "You must specify a module or a suite to run tests from")
-            else:
-                self.test_suite = self.test_module+".test_suite"
-        elif self.test_module:
-            raise DistutilsOptionError(
-                "You may specify a module or a suite, but not both")
-        self.test_args = [self.test_suite]
-        if self.verbose:
-            self.test_args.insert(0, '--verbose')
-        if self.buffer:
-            self.test_args.insert(0, '--buffer')
-        if self.catch:
-            self.test_args.insert(0, '--catch')
-        if self.failfast:
-            self.test_args.insert(0, '--failfast')
-
-    def run(self):
-        self.program = TestProgram(
-            argv=self.test_args, testRunner=self.runner, stdout=sys.stdout,
-            exit=False)
diff --git a/lib/testtools/testtools/helpers.py b/lib/testtools/testtools/helpers.py
deleted file mode 100644
index 401d2cc..0000000
--- a/lib/testtools/testtools/helpers.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'safe_hasattr',
-    'try_import',
-    'try_imports',
-    ]
-
-import sys
-
-# Compat - removal announced in 0.9.25.
-from extras import (
-    safe_hasattr,
-    try_import,
-    try_imports,
-    )
-
-
-def map_values(function, dictionary):
-    """Map ``function`` across the values of ``dictionary``.
-
-    :return: A dict with the same keys as ``dictionary``, where the value
-        of each key ``k`` is ``function(dictionary[k])``.
-    """
-    return dict((k, function(dictionary[k])) for k in dictionary)
-
-
-def filter_values(function, dictionary):
-    """Filter ``dictionary`` by its values using ``function``."""
-    return dict((k, v) for k, v in dictionary.items() if function(v))
-
-
-def dict_subtract(a, b):
-    """Return the part of ``a`` that's not in ``b``."""
-    return dict((k, a[k]) for k in set(a) - set(b))
-
-
-def list_subtract(a, b):
-    """Return a list ``a`` without the elements of ``b``.
-
-    If a particular value is in ``a`` twice and ``b`` once then the returned
-    list then that value will appear once in the returned list.
-    """
-    a_only = list(a)
-    for x in b:
-        if x in a_only:
-            a_only.remove(x)
-    return a_only
diff --git a/lib/testtools/testtools/matchers/__init__.py b/lib/testtools/testtools/matchers/__init__.py
deleted file mode 100644
index 771d814..0000000
--- a/lib/testtools/testtools/matchers/__init__.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""All the matchers.
-
-Matchers, a way to express complex assertions outside the testcase.
-
-Inspired by 'hamcrest'.
-
-Matcher provides the abstract API that all matchers need to implement.
-
-Bundled matchers are listed in __all__: a list can be obtained by running
-$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
-"""
-
-__all__ = [
-    'AfterPreprocessing',
-    'AllMatch',
-    'Annotate',
-    'AnyMatch',
-    'Contains',
-    'ContainsAll',
-    'ContainedByDict',
-    'ContainsDict',
-    'DirContains',
-    'DirExists',
-    'DocTestMatches',
-    'EndsWith',
-    'Equals',
-    'FileContains',
-    'FileExists',
-    'GreaterThan',
-    'HasLength',
-    'HasPermissions',
-    'Is',
-    'IsInstance',
-    'KeysEqual',
-    'LessThan',
-    'MatchesAll',
-    'MatchesAny',
-    'MatchesDict',
-    'MatchesException',
-    'MatchesListwise',
-    'MatchesPredicate',
-    'MatchesPredicateWithParams',
-    'MatchesRegex',
-    'MatchesSetwise',
-    'MatchesStructure',
-    'NotEquals',
-    'Not',
-    'PathExists',
-    'Raises',
-    'raises',
-    'SamePath',
-    'StartsWith',
-    'TarballContains',
-    ]
-
-from ._basic import (
-    Contains,
-    EndsWith,
-    Equals,
-    GreaterThan,
-    HasLength,
-    Is,
-    IsInstance,
-    LessThan,
-    MatchesRegex,
-    NotEquals,
-    StartsWith,
-    )
-from ._datastructures import (
-    ContainsAll,
-    MatchesListwise,
-    MatchesSetwise,
-    MatchesStructure,
-    )
-from ._dict import (
-    ContainedByDict,
-    ContainsDict,
-    KeysEqual,
-    MatchesDict,
-    )
-from ._doctest import (
-    DocTestMatches,
-    )
-from ._exception import (
-    MatchesException,
-    Raises,
-    raises,
-    )
-from ._filesystem import (
-    DirContains,
-    DirExists,
-    FileContains,
-    FileExists,
-    HasPermissions,
-    PathExists,
-    SamePath,
-    TarballContains,
-    )
-from ._higherorder import (
-    AfterPreprocessing,
-    AllMatch,
-    Annotate,
-    AnyMatch,
-    MatchesAll,
-    MatchesAny,
-    MatchesPredicate,
-    MatchesPredicateWithParams,
-    Not,
-    )
-
-# XXX: These are not explicitly included in __all__.  It's unclear how much of
-# the public interface they really are.
-from ._impl import (
-    Matcher,
-    Mismatch,
-    MismatchError,
-    )
diff --git a/lib/testtools/testtools/matchers/_basic.py b/lib/testtools/testtools/matchers/_basic.py
deleted file mode 100644
index 2d9f143..0000000
--- a/lib/testtools/testtools/matchers/_basic.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'Contains',
-    'EndsWith',
-    'Equals',
-    'GreaterThan',
-    'HasLength',
-    'Is',
-    'IsInstance',
-    'LessThan',
-    'MatchesRegex',
-    'NotEquals',
-    'StartsWith',
-    ]
-
-import operator
-from pprint import pformat
-import re
-
-from ..compat import (
-    _isbytes,
-    istext,
-    str_is_unicode,
-    text_repr,
-    )
-from ..helpers import list_subtract
-from ._higherorder import (
-    MatchesPredicateWithParams,
-    PostfixedMismatch,
-    )
-from ._impl import (
-    Matcher,
-    Mismatch,
-    )
-
-
-def _format(thing):
-    """
-    Blocks of text with newlines are formatted as triple-quote
-    strings. Everything else is pretty-printed.
-    """
-    if istext(thing) or _isbytes(thing):
-        return text_repr(thing)
-    return pformat(thing)
-
-
-class _BinaryComparison(object):
-    """Matcher that compares an object to another object."""
-
-    def __init__(self, expected):
-        self.expected = expected
-
-    def __str__(self):
-        return "%s(%r)" % (self.__class__.__name__, self.expected)
-
-    def match(self, other):
-        if self.comparator(other, self.expected):
-            return None
-        return _BinaryMismatch(self.expected, self.mismatch_string, other)
-
-    def comparator(self, expected, other):
-        raise NotImplementedError(self.comparator)
-
-
-class _BinaryMismatch(Mismatch):
-    """Two things did not match."""
-
-    def __init__(self, expected, mismatch_string, other):
-        self.expected = expected
-        self._mismatch_string = mismatch_string
-        self.other = other
-
-    def describe(self):
-        left = repr(self.expected)
-        right = repr(self.other)
-        if len(left) + len(right) > 70:
-            return "%s:\nreference = %s\nactual    = %s\n" % (
-                self._mismatch_string, _format(self.expected),
-                _format(self.other))
-        else:
-            return "%s %s %s" % (left, self._mismatch_string, right)
-
-
-class Equals(_BinaryComparison):
-    """Matches if the items are equal."""
-
-    comparator = operator.eq
-    mismatch_string = '!='
-
-
-class NotEquals(_BinaryComparison):
-    """Matches if the items are not equal.
-
-    In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
-    only matters when testing ``__ne__`` implementations.
-    """
-
-    comparator = operator.ne
-    mismatch_string = '=='
-
-
-class Is(_BinaryComparison):
-    """Matches if the items are identical."""
-
-    comparator = operator.is_
-    mismatch_string = 'is not'
-
-
-class LessThan(_BinaryComparison):
-    """Matches if the item is less than the matchers reference object."""
-
-    comparator = operator.__lt__
-    mismatch_string = 'is not >'
-
-
-class GreaterThan(_BinaryComparison):
-    """Matches if the item is greater than the matchers reference object."""
-
-    comparator = operator.__gt__
-    mismatch_string = 'is not <'
-
-
-class SameMembers(Matcher):
-    """Matches if two iterators have the same members.
-
-    This is not the same as set equivalence.  The two iterators must be of the
-    same length and have the same repetitions.
-    """
-
-    def __init__(self, expected):
-        super(SameMembers, self).__init__()
-        self.expected = expected
-
-    def __str__(self):
-        return '%s(%r)' % (self.__class__.__name__, self.expected)
-
-    def match(self, observed):
-        expected_only = list_subtract(self.expected, observed)
-        observed_only = list_subtract(observed, self.expected)
-        if expected_only == observed_only == []:
-            return
-        return PostfixedMismatch(
-            "\nmissing:    %s\nextra:      %s" % (
-                _format(expected_only), _format(observed_only)),
-            _BinaryMismatch(self.expected, 'elements differ', observed))
-
-
-class DoesNotStartWith(Mismatch):
-
-    def __init__(self, matchee, expected):
-        """Create a DoesNotStartWith Mismatch.
-
-        :param matchee: the string that did not match.
-        :param expected: the string that 'matchee' was expected to start with.
-        """
-        self.matchee = matchee
-        self.expected = expected
-
-    def describe(self):
-        return "%s does not start with %s." % (
-            text_repr(self.matchee), text_repr(self.expected))
-
-
-class StartsWith(Matcher):
-    """Checks whether one string starts with another."""
-
-    def __init__(self, expected):
-        """Create a StartsWith Matcher.
-
-        :param expected: the string that matchees should start with.
-        """
-        self.expected = expected
-
-    def __str__(self):
-        return "StartsWith(%r)" % (self.expected,)
-
-    def match(self, matchee):
-        if not matchee.startswith(self.expected):
-            return DoesNotStartWith(matchee, self.expected)
-        return None
-
-
-class DoesNotEndWith(Mismatch):
-
-    def __init__(self, matchee, expected):
-        """Create a DoesNotEndWith Mismatch.
-
-        :param matchee: the string that did not match.
-        :param expected: the string that 'matchee' was expected to end with.
-        """
-        self.matchee = matchee
-        self.expected = expected
-
-    def describe(self):
-        return "%s does not end with %s." % (
-            text_repr(self.matchee), text_repr(self.expected))
-
-
-class EndsWith(Matcher):
-    """Checks whether one string ends with another."""
-
-    def __init__(self, expected):
-        """Create a EndsWith Matcher.
-
-        :param expected: the string that matchees should end with.
-        """
-        self.expected = expected
-
-    def __str__(self):
-        return "EndsWith(%r)" % (self.expected,)
-
-    def match(self, matchee):
-        if not matchee.endswith(self.expected):
-            return DoesNotEndWith(matchee, self.expected)
-        return None
-
-
-class IsInstance(object):
-    """Matcher that wraps isinstance."""
-
-    def __init__(self, *types):
-        self.types = tuple(types)
-
-    def __str__(self):
-        return "%s(%s)" % (self.__class__.__name__,
-                ', '.join(type.__name__ for type in self.types))
-
-    def match(self, other):
-        if isinstance(other, self.types):
-            return None
-        return NotAnInstance(other, self.types)
-
-
-class NotAnInstance(Mismatch):
-
-    def __init__(self, matchee, types):
-        """Create a NotAnInstance Mismatch.
-
-        :param matchee: the thing which is not an instance of any of types.
-        :param types: A tuple of the types which were expected.
-        """
-        self.matchee = matchee
-        self.types = types
-
-    def describe(self):
-        if len(self.types) == 1:
-            typestr = self.types[0].__name__
-        else:
-            typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
-                    self.types)
-        return "'%s' is not an instance of %s" % (self.matchee, typestr)
-
-
-class DoesNotContain(Mismatch):
-
-    def __init__(self, matchee, needle):
-        """Create a DoesNotContain Mismatch.
-
-        :param matchee: the object that did not contain needle.
-        :param needle: the needle that 'matchee' was expected to contain.
-        """
-        self.matchee = matchee
-        self.needle = needle
-
-    def describe(self):
-        return "%r not in %r" % (self.needle, self.matchee)
-
-
-class Contains(Matcher):
-    """Checks whether something is contained in another thing."""
-
-    def __init__(self, needle):
-        """Create a Contains Matcher.
-
-        :param needle: the thing that needs to be contained by matchees.
-        """
-        self.needle = needle
-
-    def __str__(self):
-        return "Contains(%r)" % (self.needle,)
-
-    def match(self, matchee):
-        try:
-            if self.needle not in matchee:
-                return DoesNotContain(matchee, self.needle)
-        except TypeError:
-            # e.g. 1 in 2 will raise TypeError
-            return DoesNotContain(matchee, self.needle)
-        return None
-
-
-class MatchesRegex(object):
-    """Matches if the matchee is matched by a regular expression."""
-
-    def __init__(self, pattern, flags=0):
-        self.pattern = pattern
-        self.flags = flags
-
-    def __str__(self):
-        args = ['%r' % self.pattern]
-        flag_arg = []
-        # dir() sorts the attributes for us, so we don't need to do it again.
-        for flag in dir(re):
-            if len(flag) == 1:
-                if self.flags & getattr(re, flag):
-                    flag_arg.append('re.%s' % flag)
-        if flag_arg:
-            args.append('|'.join(flag_arg))
-        return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
-
-    def match(self, value):
-        if not re.match(self.pattern, value, self.flags):
-            pattern = self.pattern
-            if not isinstance(pattern, str_is_unicode and str or unicode):
-                pattern = pattern.decode("latin1")
-            pattern = pattern.encode("unicode_escape").decode("ascii")
-            return Mismatch("%r does not match /%s/" % (
-                    value, pattern.replace("\\\\", "\\")))
-
-
-def has_len(x, y):
-    return len(x) == y
-
-
-HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength")
diff --git a/lib/testtools/testtools/matchers/_datastructures.py b/lib/testtools/testtools/matchers/_datastructures.py
deleted file mode 100644
index 70de790..0000000
--- a/lib/testtools/testtools/matchers/_datastructures.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'ContainsAll',
-    'MatchesListwise',
-    'MatchesSetwise',
-    'MatchesStructure',
-    ]
-
-"""Matchers that operate with knowledge of Python data structures."""
-
-from ..helpers import map_values
-from ._higherorder import (
-    Annotate,
-    MatchesAll,
-    MismatchesAll,
-    )
-from ._impl import Mismatch
-
-
-def ContainsAll(items):
-    """Make a matcher that checks whether a list of things is contained
-    in another thing.
-
-    The matcher effectively checks that the provided sequence is a subset of
-    the matchee.
-    """
-    from ._basic import Contains
-    return MatchesAll(*map(Contains, items), first_only=False)
-
-
-class MatchesListwise(object):
-    """Matches if each matcher matches the corresponding value.
-
-    More easily explained by example than in words:
-
-    >>> from ._basic import Equals
-    >>> MatchesListwise([Equals(1)]).match([1])
-    >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
-    >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
-    Differences: [
-    1 != 2
-    2 != 1
-    ]
-    >>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
-    >>> print (matcher.match([3, 4]).describe())
-    1 != 3
-    """
-
-    def __init__(self, matchers, first_only=False):
-        """Construct a MatchesListwise matcher.
-
-        :param matchers: A list of matcher that the matched values must match.
-        :param first_only: If True, then only report the first mismatch,
-            otherwise report all of them. Defaults to False.
-        """
-        self.matchers = matchers
-        self.first_only = first_only
-
-    def match(self, values):
-        from ._basic import Equals
-        mismatches = []
-        length_mismatch = Annotate(
-            "Length mismatch", Equals(len(self.matchers))).match(len(values))
-        if length_mismatch:
-            mismatches.append(length_mismatch)
-        for matcher, value in zip(self.matchers, values):
-            mismatch = matcher.match(value)
-            if mismatch:
-                if self.first_only:
-                    return mismatch
-                mismatches.append(mismatch)
-        if mismatches:
-            return MismatchesAll(mismatches)
-
-
-class MatchesStructure(object):
-    """Matcher that matches an object structurally.
-
-    'Structurally' here means that attributes of the object being matched are
-    compared against given matchers.
-
-    `fromExample` allows the creation of a matcher from a prototype object and
-    then modified versions can be created with `update`.
-
-    `byEquality` creates a matcher in much the same way as the constructor,
-    except that the matcher for each of the attributes is assumed to be
-    `Equals`.
-
-    `byMatcher` creates a similar matcher to `byEquality`, but you get to pick
-    the matcher, rather than just using `Equals`.
-    """
-
-    def __init__(self, **kwargs):
-        """Construct a `MatchesStructure`.
-
-        :param kwargs: A mapping of attributes to matchers.
-        """
-        self.kws = kwargs
-
-    @classmethod
-    def byEquality(cls, **kwargs):
-        """Matches an object where the attributes equal the keyword values.
-
-        Similar to the constructor, except that the matcher is assumed to be
-        Equals.
-        """
-        from ._basic import Equals
-        return cls.byMatcher(Equals, **kwargs)
-
-    @classmethod
-    def byMatcher(cls, matcher, **kwargs):
-        """Matches an object where the attributes match the keyword values.
-
-        Similar to the constructor, except that the provided matcher is used
-        to match all of the values.
-        """
-        return cls(**map_values(matcher, kwargs))
-
-    @classmethod
-    def fromExample(cls, example, *attributes):
-        from ._basic import Equals
-        kwargs = {}
-        for attr in attributes:
-            kwargs[attr] = Equals(getattr(example, attr))
-        return cls(**kwargs)
-
-    def update(self, **kws):
-        new_kws = self.kws.copy()
-        for attr, matcher in kws.items():
-            if matcher is None:
-                new_kws.pop(attr, None)
-            else:
-                new_kws[attr] = matcher
-        return type(self)(**new_kws)
-
-    def __str__(self):
-        kws = []
-        for attr, matcher in sorted(self.kws.items()):
-            kws.append("%s=%s" % (attr, matcher))
-        return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
-
-    def match(self, value):
-        matchers = []
-        values = []
-        for attr, matcher in sorted(self.kws.items()):
-            matchers.append(Annotate(attr, matcher))
-            values.append(getattr(value, attr))
-        return MatchesListwise(matchers).match(values)
-
-
-class MatchesSetwise(object):
-    """Matches if all the matchers match elements of the value being matched.
-
-    That is, each element in the 'observed' set must match exactly one matcher
-    from the set of matchers, with no matchers left over.
-
-    The difference compared to `MatchesListwise` is that the order of the
-    matchings does not matter.
-    """
-
-    def __init__(self, *matchers):
-        self.matchers = matchers
-
-    def match(self, observed):
-        remaining_matchers = set(self.matchers)
-        not_matched = []
-        for value in observed:
-            for matcher in remaining_matchers:
-                if matcher.match(value) is None:
-                    remaining_matchers.remove(matcher)
-                    break
-            else:
-                not_matched.append(value)
-        if not_matched or remaining_matchers:
-            remaining_matchers = list(remaining_matchers)
-            # There are various cases that all should be reported somewhat
-            # differently.
-
-            # There are two trivial cases:
-            # 1) There are just some matchers left over.
-            # 2) There are just some values left over.
-
-            # Then there are three more interesting cases:
-            # 3) There are the same number of matchers and values left over.
-            # 4) There are more matchers left over than values.
-            # 5) There are more values left over than matchers.
-
-            if len(not_matched) == 0:
-                if len(remaining_matchers) > 1:
-                    msg = "There were %s matchers left over: " % (
-                        len(remaining_matchers),)
-                else:
-                    msg = "There was 1 matcher left over: "
-                msg += ', '.join(map(str, remaining_matchers))
-                return Mismatch(msg)
-            elif len(remaining_matchers) == 0:
-                if len(not_matched) > 1:
-                    return Mismatch(
-                        "There were %s values left over: %s" % (
-                            len(not_matched), not_matched))
-                else:
-                    return Mismatch(
-                        "There was 1 value left over: %s" % (
-                            not_matched, ))
-            else:
-                common_length = min(len(remaining_matchers), len(not_matched))
-                if common_length == 0:
-                    raise AssertionError("common_length can't be 0 here")
-                if common_length > 1:
-                    msg = "There were %s mismatches" % (common_length,)
-                else:
-                    msg = "There was 1 mismatch"
-                if len(remaining_matchers) > len(not_matched):
-                    extra_matchers = remaining_matchers[common_length:]
-                    msg += " and %s extra matcher" % (len(extra_matchers), )
-                    if len(extra_matchers) > 1:
-                        msg += "s"
-                    msg += ': ' + ', '.join(map(str, extra_matchers))
-                elif len(not_matched) > len(remaining_matchers):
-                    extra_values = not_matched[common_length:]
-                    msg += " and %s extra value" % (len(extra_values), )
-                    if len(extra_values) > 1:
-                        msg += "s"
-                    msg += ': ' + str(extra_values)
-                return Annotate(
-                    msg, MatchesListwise(remaining_matchers[:common_length])
-                    ).match(not_matched[:common_length])
diff --git a/lib/testtools/testtools/matchers/_dict.py b/lib/testtools/testtools/matchers/_dict.py
deleted file mode 100644
index b1ec915..0000000
--- a/lib/testtools/testtools/matchers/_dict.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'KeysEqual',
-    ]
-
-from ..helpers import (
-    dict_subtract,
-    filter_values,
-    map_values,
-    )
-from ._higherorder import (
-    AnnotatedMismatch,
-    PrefixedMismatch,
-    MismatchesAll,
-    )
-from ._impl import Matcher, Mismatch
-
-
-def LabelledMismatches(mismatches, details=None):
-    """A collection of mismatches, each labelled."""
-    return MismatchesAll(
-        (PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
-        wrap=False)
-
-
-class MatchesAllDict(Matcher):
-    """Matches if all of the matchers it is created with match.
-
-    A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
-    mismatches with the key of the dictionary.
-    """
-
-    def __init__(self, matchers):
-        super(MatchesAllDict, self).__init__()
-        self.matchers = matchers
-
-    def __str__(self):
-        return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
-
-    def match(self, observed):
-        mismatches = {}
-        for label in self.matchers:
-            mismatches[label] = self.matchers[label].match(observed)
-        return _dict_to_mismatch(
-            mismatches, result_mismatch=LabelledMismatches)
-
-
-class DictMismatches(Mismatch):
-    """A mismatch with a dict of child mismatches."""
-
-    def __init__(self, mismatches, details=None):
-        super(DictMismatches, self).__init__(None, details=details)
-        self.mismatches = mismatches
-
-    def describe(self):
-        lines = ['{']
-        lines.extend(
-            ['  %r: %s,' % (key, mismatch.describe())
-             for (key, mismatch) in sorted(self.mismatches.items())])
-        lines.append('}')
-        return '\n'.join(lines)
-
-
-def _dict_to_mismatch(data, to_mismatch=None,
-                      result_mismatch=DictMismatches):
-    if to_mismatch:
-        data = map_values(to_mismatch, data)
-    mismatches = filter_values(bool, data)
-    if mismatches:
-        return result_mismatch(mismatches)
-
-
-class _MatchCommonKeys(Matcher):
-    """Match on keys in a dictionary.
-
-    Given a dictionary where the values are matchers, this will look for
-    common keys in the matched dictionary and match if and only if all common
-    keys match the given matchers.
-
-    Thus::
-
-      >>> structure = {'a': Equals('x'), 'b': Equals('y')}
-      >>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
-      None
-    """
-
-    def __init__(self, dict_of_matchers):
-        super(_MatchCommonKeys, self).__init__()
-        self._matchers = dict_of_matchers
-
-    def _compare_dicts(self, expected, observed):
-        common_keys = set(expected.keys()) & set(observed.keys())
-        mismatches = {}
-        for key in common_keys:
-            mismatch = expected[key].match(observed[key])
-            if mismatch:
-                mismatches[key] = mismatch
-        return mismatches
-
-    def match(self, observed):
-        mismatches = self._compare_dicts(self._matchers, observed)
-        if mismatches:
-            return DictMismatches(mismatches)
-
-
-class _SubDictOf(Matcher):
-    """Matches if the matched dict only has keys that are in given dict."""
-
-    def __init__(self, super_dict, format_value=repr):
-        super(_SubDictOf, self).__init__()
-        self.super_dict = super_dict
-        self.format_value = format_value
-
-    def match(self, observed):
-        excess = dict_subtract(observed, self.super_dict)
-        return _dict_to_mismatch(
-            excess, lambda v: Mismatch(self.format_value(v)))
-
-
-class _SuperDictOf(Matcher):
-    """Matches if all of the keys in the given dict are in the matched dict.
-    """
-
-    def __init__(self, sub_dict, format_value=repr):
-        super(_SuperDictOf, self).__init__()
-        self.sub_dict = sub_dict
-        self.format_value = format_value
-
-    def match(self, super_dict):
-        return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
-
-
-def _format_matcher_dict(matchers):
-    return '{%s}' % (
-        ', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
-
-
-class _CombinedMatcher(Matcher):
-    """Many matchers labelled and combined into one uber-matcher.
-
-    Subclass this and then specify a dict of matcher factories that take a
-    single 'expected' value and return a matcher.  The subclass will match
-    only if all of the matchers made from factories match.
-
-    Not **entirely** dissimilar from ``MatchesAll``.
-    """
-
-    matcher_factories = {}
-
-    def __init__(self, expected):
-        super(_CombinedMatcher, self).__init__()
-        self._expected = expected
-
-    def format_expected(self, expected):
-        return repr(expected)
-
-    def __str__(self):
-        return '%s(%s)' % (
-            self.__class__.__name__, self.format_expected(self._expected))
-
-    def match(self, observed):
-        matchers = dict(
-            (k, v(self._expected)) for k, v in self.matcher_factories.items())
-        return MatchesAllDict(matchers).match(observed)
-
-
-class MatchesDict(_CombinedMatcher):
-    """Match a dictionary exactly, by its keys.
-
-    Specify a dictionary mapping keys (often strings) to matchers.  This is
-    the 'expected' dict.  Any dictionary that matches this must have exactly
-    the same keys, and the values must match the corresponding matchers in the
-    expected dict.
-    """
-
-    matcher_factories = {
-        'Extra': _SubDictOf,
-        'Missing': lambda m: _SuperDictOf(m, format_value=str),
-        'Differences': _MatchCommonKeys,
-        }
-
-    format_expected = lambda self, expected: _format_matcher_dict(expected)
-
-
-class ContainsDict(_CombinedMatcher):
-    """Match a dictionary for that contains a specified sub-dictionary.
-
-    Specify a dictionary mapping keys (often strings) to matchers.  This is
-    the 'expected' dict.  Any dictionary that matches this must have **at
-    least** these keys, and the values must match the corresponding matchers
-    in the expected dict.  Dictionaries that have more keys will also match.
-
-    In other words, any matching dictionary must contain the dictionary given
-    to the constructor.
-
-    Does not check for strict sub-dictionary.  That is, equal dictionaries
-    match.
-    """
-
-    matcher_factories = {
-        'Missing': lambda m: _SuperDictOf(m, format_value=str),
-        'Differences': _MatchCommonKeys,
-        }
-
-    format_expected = lambda self, expected: _format_matcher_dict(expected)
-
-
-class ContainedByDict(_CombinedMatcher):
-    """Match a dictionary for which this is a super-dictionary.
-
-    Specify a dictionary mapping keys (often strings) to matchers.  This is
-    the 'expected' dict.  Any dictionary that matches this must have **only**
-    these keys, and the values must match the corresponding matchers in the
-    expected dict.  Dictionaries that have fewer keys can also match.
-
-    In other words, any matching dictionary must be contained by the
-    dictionary given to the constructor.
-
-    Does not check for strict super-dictionary.  That is, equal dictionaries
-    match.
-    """
-
-    matcher_factories = {
-        'Extra': _SubDictOf,
-        'Differences': _MatchCommonKeys,
-        }
-
-    format_expected = lambda self, expected: _format_matcher_dict(expected)
-
-
-class KeysEqual(Matcher):
-    """Checks whether a dict has particular keys."""
-
-    def __init__(self, *expected):
-        """Create a `KeysEqual` Matcher.
-
-        :param expected: The keys the dict is expected to have.  If a dict,
-            then we use the keys of that dict, if a collection, we assume it
-            is a collection of expected keys.
-        """
-        super(KeysEqual, self).__init__()
-        try:
-            self.expected = expected[0].keys()
-        except AttributeError:
-            self.expected = list(expected)
-
-    def __str__(self):
-        return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
-
-    def match(self, matchee):
-        from ._basic import _BinaryMismatch, Equals
-        expected = sorted(self.expected)
-        matched = Equals(expected).match(sorted(matchee.keys()))
-        if matched:
-            return AnnotatedMismatch(
-                'Keys not equal',
-                _BinaryMismatch(expected, 'does not match', matchee))
-        return None
diff --git a/lib/testtools/testtools/matchers/_doctest.py b/lib/testtools/testtools/matchers/_doctest.py
deleted file mode 100644
index 41f3c00..0000000
--- a/lib/testtools/testtools/matchers/_doctest.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'DocTestMatches',
-    ]
-
-import doctest
-import re
-
-from ..compat import str_is_unicode
-from ._impl import Mismatch
-
-
-class _NonManglingOutputChecker(doctest.OutputChecker):
-    """Doctest checker that works with unicode rather than mangling strings
-
-    This is needed because current Python versions have tried to fix string
-    encoding related problems, but regressed the default behaviour with
-    unicode inputs in the process.
-
-    In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed
-    to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if
-    that can't be determined. Worse, that encoding process happens in the
-    innocent looking `_indent` global function. Because the
-    `DocTestMismatch.describe` result may well not be destined for printing to
-    stdout, this is no good for us. To get a unicode return as before, the
-    method is monkey patched if ``doctest._encoding`` exists.
-
-    Python 3 has a different problem. For some reason both inputs are encoded
-    to ascii with 'backslashreplace', making an escaped string matches its
-    unescaped form. Overriding the offending ``OutputChecker._toAscii`` method
-    is sufficient to revert this.
-    """
-
-    def _toAscii(self, s):
-        """Return ``s`` unchanged rather than mangling it to ascii"""
-        return s
-
-    # Only do this overriding hackery if doctest has a broken _input function
-    if getattr(doctest, "_encoding", None) is not None:
-        from types import FunctionType as __F
-        __f = doctest.OutputChecker.output_difference.im_func
-        __g = dict(__f.func_globals)
-        def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
-            """Prepend non-empty lines in ``s`` with ``indent`` number of spaces"""
-            return _pattern.sub(indent*" ", s)
-        __g["_indent"] = _indent
-        output_difference = __F(__f.func_code, __g, "output_difference")
-        del __F, __f, __g, _indent
-
-
-class DocTestMatches(object):
-    """See if a string matches a doctest example."""
-
-    def __init__(self, example, flags=0):
-        """Create a DocTestMatches to match example.
-
-        :param example: The example to match e.g. 'foo bar baz'
-        :param flags: doctest comparison flags to match on. e.g.
-            doctest.ELLIPSIS.
-        """
-        if not example.endswith('\n'):
-            example += '\n'
-        self.want = example # required variable name by doctest.
-        self.flags = flags
-        self._checker = _NonManglingOutputChecker()
-
-    def __str__(self):
-        if self.flags:
-            flagstr = ", flags=%d" % self.flags
-        else:
-            flagstr = ""
-        return 'DocTestMatches(%r%s)' % (self.want, flagstr)
-
-    def _with_nl(self, actual):
-        result = self.want.__class__(actual)
-        if not result.endswith('\n'):
-            result += '\n'
-        return result
-
-    def match(self, actual):
-        with_nl = self._with_nl(actual)
-        if self._checker.check_output(self.want, with_nl, self.flags):
-            return None
-        return DocTestMismatch(self, with_nl)
-
-    def _describe_difference(self, with_nl):
-        return self._checker.output_difference(self, with_nl, self.flags)
-
-
-class DocTestMismatch(Mismatch):
-    """Mismatch object for DocTestMatches."""
-
-    def __init__(self, matcher, with_nl):
-        self.matcher = matcher
-        self.with_nl = with_nl
-
-    def describe(self):
-        s = self.matcher._describe_difference(self.with_nl)
-        if str_is_unicode or isinstance(s, unicode):
-            return s
-        # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
-        #                be escaped, in addition to non-ascii bytes.
-        return s.decode("latin1").encode("ascii", "backslashreplace")
diff --git a/lib/testtools/testtools/matchers/_exception.py b/lib/testtools/testtools/matchers/_exception.py
deleted file mode 100644
index cd4c90b..0000000
--- a/lib/testtools/testtools/matchers/_exception.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'MatchesException',
-    'Raises',
-    'raises',
-    ]
-
-import sys
-
-from testtools.compat import (
-    classtypes,
-    istext,
-    )
-from ._basic import MatchesRegex
-from ._higherorder import AfterPreproccessing
-from ._impl import (
-    Matcher,
-    Mismatch,
-    )
-
-
-_error_repr = BaseException.__repr__
-
-
-def _is_exception(exc):
-    return isinstance(exc, BaseException)
-
-
-def _is_user_exception(exc):
-    return isinstance(exc, Exception)
-
-
-class MatchesException(Matcher):
-    """Match an exc_info tuple against an exception instance or type."""
-
-    def __init__(self, exception, value_re=None):
-        """Create a MatchesException that will match exc_info's for exception.
-
-        :param exception: Either an exception instance or type.
-            If an instance is given, the type and arguments of the exception
-            are checked. If a type is given only the type of the exception is
-            checked. If a tuple is given, then as with isinstance, any of the
-            types in the tuple matching is sufficient to match.
-        :param value_re: If 'exception' is a type, and the matchee exception
-            is of the right type, then match against this.  If value_re is a
-            string, then assume value_re is a regular expression and match
-            the str() of the exception against it.  Otherwise, assume value_re
-            is a matcher, and match the exception against it.
-        """
-        Matcher.__init__(self)
-        self.expected = exception
-        if istext(value_re):
-            value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
-        self.value_re = value_re
-        expected_type = type(self.expected)
-        self._is_instance = not any(issubclass(expected_type, class_type)
-                for class_type in classtypes() + (tuple,))
-
-    def match(self, other):
-        if type(other) != tuple:
-            return Mismatch('%r is not an exc_info tuple' % other)
-        expected_class = self.expected
-        if self._is_instance:
-            expected_class = expected_class.__class__
-        if not issubclass(other[0], expected_class):
-            return Mismatch('%r is not a %r' % (other[0], expected_class))
-        if self._is_instance:
-            if other[1].args != self.expected.args:
-                return Mismatch('%s has different arguments to %s.' % (
-                        _error_repr(other[1]), _error_repr(self.expected)))
-        elif self.value_re is not None:
-            return self.value_re.match(other[1])
-
-    def __str__(self):
-        if self._is_instance:
-            return "MatchesException(%s)" % _error_repr(self.expected)
-        return "MatchesException(%s)" % repr(self.expected)
-
-
-class Raises(Matcher):
-    """Match if the matchee raises an exception when called.
-
-    Exceptions which are not subclasses of Exception propogate out of the
-    Raises.match call unless they are explicitly matched.
-    """
-
-    def __init__(self, exception_matcher=None):
-        """Create a Raises matcher.
-
-        :param exception_matcher: Optional validator for the exception raised
-            by matchee. If supplied the exc_info tuple for the exception raised
-            is passed into that matcher. If no exception_matcher is supplied
-            then the simple fact of raising an exception is considered enough
-            to match on.
-        """
-        self.exception_matcher = exception_matcher
-
-    def match(self, matchee):
-        try:
-            result = matchee()
-            return Mismatch('%r returned %r' % (matchee, result))
-        # Catch all exceptions: Raises() should be able to match a
-        # KeyboardInterrupt or SystemExit.
-        except:
-            exc_info = sys.exc_info()
-            if self.exception_matcher:
-                mismatch = self.exception_matcher.match(exc_info)
-                if not mismatch:
-                    del exc_info
-                    return
-            else:
-                mismatch = None
-            # The exception did not match, or no explicit matching logic was
-            # performed. If the exception is a non-user exception then
-            # propagate it.
-            exception = exc_info[1]
-            if _is_exception(exception) and not _is_user_exception(exception):
-                del exc_info
-                raise
-            return mismatch
-
-    def __str__(self):
-        return 'Raises()'
-
-
-def raises(exception):
-    """Make a matcher that checks that a callable raises an exception.
-
-    This is a convenience function, exactly equivalent to::
-
-        return Raises(MatchesException(exception))
-
-    See `Raises` and `MatchesException` for more information.
-    """
-    return Raises(MatchesException(exception))
diff --git a/lib/testtools/testtools/matchers/_filesystem.py b/lib/testtools/testtools/matchers/_filesystem.py
deleted file mode 100644
index 54f749b..0000000
--- a/lib/testtools/testtools/matchers/_filesystem.py
+++ /dev/null
@@ -1,192 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-"""Matchers for things related to the filesystem."""
-
-__all__ = [
-    'FileContains',
-    'DirExists',
-    'FileExists',
-    'HasPermissions',
-    'PathExists',
-    'SamePath',
-    'TarballContains',
-    ]
-
-import os
-import tarfile
-
-from ._basic import Equals
-from ._higherorder import (
-    MatchesAll,
-    MatchesPredicate,
-    )
-from ._impl import (
-    Matcher,
-    )
-
-
-def PathExists():
-    """Matches if the given path exists.
-
-    Use like this::
-
-      assertThat('/some/path', PathExists())
-    """
-    return MatchesPredicate(os.path.exists, "%s does not exist.")
-
-
-def DirExists():
-    """Matches if the path exists and is a directory."""
-    return MatchesAll(
-        PathExists(),
-        MatchesPredicate(os.path.isdir, "%s is not a directory."),
-        first_only=True)
-
-
-def FileExists():
-    """Matches if the given path exists and is a file."""
-    return MatchesAll(
-        PathExists(),
-        MatchesPredicate(os.path.isfile, "%s is not a file."),
-        first_only=True)
-
-
-class DirContains(Matcher):
-    """Matches if the given directory contains files with the given names.
-
-    That is, is the directory listing exactly equal to the given files?
-    """
-
-    def __init__(self, filenames=None, matcher=None):
-        """Construct a ``DirContains`` matcher.
-
-        Can be used in a basic mode where the whole directory listing is
-        matched against an expected directory listing (by passing
-        ``filenames``).  Can also be used in a more advanced way where the
-        whole directory listing is matched against an arbitrary matcher (by
-        passing ``matcher`` instead).
-
-        :param filenames: If specified, match the sorted directory listing
-            against this list of filenames, sorted.
-        :param matcher: If specified, match the sorted directory listing
-            against this matcher.
-        """
-        if filenames == matcher == None:
-            raise AssertionError(
-                "Must provide one of `filenames` or `matcher`.")
-        if None not in (filenames, matcher):
-            raise AssertionError(
-                "Must provide either `filenames` or `matcher`, not both.")
-        if filenames is None:
-            self.matcher = matcher
-        else:
-            self.matcher = Equals(sorted(filenames))
-
-    def match(self, path):
-        mismatch = DirExists().match(path)
-        if mismatch is not None:
-            return mismatch
-        return self.matcher.match(sorted(os.listdir(path)))
-
-
-class FileContains(Matcher):
-    """Matches if the given file has the specified contents."""
-
-    def __init__(self, contents=None, matcher=None):
-        """Construct a ``FileContains`` matcher.
-
-        Can be used in a basic mode where the file contents are compared for
-        equality against the expected file contents (by passing ``contents``).
-        Can also be used in a more advanced way where the file contents are
-        matched against an arbitrary matcher (by passing ``matcher`` instead).
-
-        :param contents: If specified, match the contents of the file with
-            these contents.
-        :param matcher: If specified, match the contents of the file against
-            this matcher.
-        """
-        if contents == matcher == None:
-            raise AssertionError(
-                "Must provide one of `contents` or `matcher`.")
-        if None not in (contents, matcher):
-            raise AssertionError(
-                "Must provide either `contents` or `matcher`, not both.")
-        if matcher is None:
-            self.matcher = Equals(contents)
-        else:
-            self.matcher = matcher
-
-    def match(self, path):
-        mismatch = PathExists().match(path)
-        if mismatch is not None:
-            return mismatch
-        f = open(path)
-        try:
-            actual_contents = f.read()
-            return self.matcher.match(actual_contents)
-        finally:
-            f.close()
-
-    def __str__(self):
-        return "File at path exists and contains %s" % self.contents
-
-
-class HasPermissions(Matcher):
-    """Matches if a file has the given permissions.
-
-    Permissions are specified and matched as a four-digit octal string.
-    """
-
-    def __init__(self, octal_permissions):
-        """Construct a HasPermissions matcher.
-
-        :param octal_permissions: A four digit octal string, representing the
-            intended access permissions. e.g. '0775' for rwxrwxr-x.
-        """
-        super(HasPermissions, self).__init__()
-        self.octal_permissions = octal_permissions
-
-    def match(self, filename):
-        permissions = oct(os.stat(filename).st_mode)[-4:]
-        return Equals(self.octal_permissions).match(permissions)
-
-
-class SamePath(Matcher):
-    """Matches if two paths are the same.
-
-    That is, the paths are equal, or they point to the same file but in
-    different ways.  The paths do not have to exist.
-    """
-
-    def __init__(self, path):
-        super(SamePath, self).__init__()
-        self.path = path
-
-    def match(self, other_path):
-        f = lambda x: os.path.abspath(os.path.realpath(x))
-        return Equals(f(self.path)).match(f(other_path))
-
-
-class TarballContains(Matcher):
-    """Matches if the given tarball contains the given paths.
-
-    Uses TarFile.getnames() to get the paths out of the tarball.
-    """
-
-    def __init__(self, paths):
-        super(TarballContains, self).__init__()
-        self.paths = paths
-        self.path_matcher = Equals(sorted(self.paths))
-
-    def match(self, tarball_path):
-        # Open underlying file first to ensure it's always closed:
-        # <http://bugs.python.org/issue10233>
-        f = open(tarball_path, "rb")
-        try:
-            tarball = tarfile.open(tarball_path, fileobj=f)
-            try:
-                return self.path_matcher.match(sorted(tarball.getnames()))
-            finally:
-                tarball.close()
-        finally:
-            f.close()
diff --git a/lib/testtools/testtools/matchers/_higherorder.py b/lib/testtools/testtools/matchers/_higherorder.py
deleted file mode 100644
index 3570f57..0000000
--- a/lib/testtools/testtools/matchers/_higherorder.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-__all__ = [
-    'AfterPreprocessing',
-    'AllMatch',
-    'Annotate',
-    'AnyMatch',
-    'MatchesAny',
-    'MatchesAll',
-    'Not',
-    ]
-
-import types
-
-from ._impl import (
-    Matcher,
-    Mismatch,
-    MismatchDecorator,
-    )
-
-
-class MatchesAny(object):
-    """Matches if any of the matchers it is created with match."""
-
-    def __init__(self, *matchers):
-        self.matchers = matchers
-
-    def match(self, matchee):
-        results = []
-        for matcher in self.matchers:
-            mismatch = matcher.match(matchee)
-            if mismatch is None:
-                return None
-            results.append(mismatch)
-        return MismatchesAll(results)
-
-    def __str__(self):
-        return "MatchesAny(%s)" % ', '.join([
-            str(matcher) for matcher in self.matchers])
-
-
-class MatchesAll(object):
-    """Matches if all of the matchers it is created with match."""
-
-    def __init__(self, *matchers, **options):
-        """Construct a MatchesAll matcher.
-
-        Just list the component matchers as arguments in the ``*args``
-        style. If you want only the first mismatch to be reported, past in
-        first_only=True as a keyword argument. By default, all mismatches are
-        reported.
-        """
-        self.matchers = matchers
-        self.first_only = options.get('first_only', False)
-
-    def __str__(self):
-        return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
-
-    def match(self, matchee):
-        results = []
-        for matcher in self.matchers:
-            mismatch = matcher.match(matchee)
-            if mismatch is not None:
-                if self.first_only:
-                    return mismatch
-                results.append(mismatch)
-        if results:
-            return MismatchesAll(results)
-        else:
-            return None
-
-
-class MismatchesAll(Mismatch):
-    """A mismatch with many child mismatches."""
-
-    def __init__(self, mismatches, wrap=True):
-        self.mismatches = mismatches
-        self._wrap = wrap
-
-    def describe(self):
-        descriptions = []
-        if self._wrap:
-            descriptions = ["Differences: ["]
-        for mismatch in self.mismatches:
-            descriptions.append(mismatch.describe())
-        if self._wrap:
-            descriptions.append("]")
-        return '\n'.join(descriptions)
-
-
-class Not(object):
-    """Inverts a matcher."""
-
-    def __init__(self, matcher):
-        self.matcher = matcher
-
-    def __str__(self):
-        return 'Not(%s)' % (self.matcher,)
-
-    def match(self, other):
-        mismatch = self.matcher.match(other)
-        if mismatch is None:
-            return MatchedUnexpectedly(self.matcher, other)
-        else:
-            return None
-
-
-class MatchedUnexpectedly(Mismatch):
-    """A thing matched when it wasn't supposed to."""
-
-    def __init__(self, matcher, other):
-        self.matcher = matcher
-        self.other = other
-
-    def describe(self):
-        return "%r matches %s" % (self.other, self.matcher)
-
-
-class Annotate(object):
-    """Annotates a matcher with a descriptive string.
-
-    Mismatches are then described as '<mismatch>: <annotation>'.
-    """
-
-    def __init__(self, annotation, matcher):
-        self.annotation = annotation
-        self.matcher = matcher
-
-    @classmethod
-    def if_message(cls, annotation, matcher):
-        """Annotate ``matcher`` only if ``annotation`` is non-empty."""
-        if not annotation:
-            return matcher
-        return cls(annotation, matcher)
-
-    def __str__(self):
-        return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
-
-    def match(self, other):
-        mismatch = self.matcher.match(other)
-        if mismatch is not None:
-            return AnnotatedMismatch(self.annotation, mismatch)
-
-
-class PostfixedMismatch(MismatchDecorator):
-    """A mismatch annotated with a descriptive string."""
-
-    def __init__(self, annotation, mismatch):
-        super(PostfixedMismatch, self).__init__(mismatch)
-        self.annotation = annotation
-        self.mismatch = mismatch
-
-    def describe(self):
-        return '%s: %s' % (self.original.describe(), self.annotation)
-
-
-AnnotatedMismatch = PostfixedMismatch
-
-
-class PrefixedMismatch(MismatchDecorator):
-
-    def __init__(self, prefix, mismatch):
-        super(PrefixedMismatch, self).__init__(mismatch)
-        self.prefix = prefix
-
-    def describe(self):
-        return '%s: %s' % (self.prefix, self.original.describe())
-
-
-class AfterPreprocessing(object):
-    """Matches if the value matches after passing through a function.
-
-    This can be used to aid in creating trivial matchers as functions, for
-    example::
-
-      def PathHasFileContent(content):
-          def _read(path):
-              return open(path).read()
-          return AfterPreprocessing(_read, Equals(content))
-    """
-
-    def __init__(self, preprocessor, matcher, annotate=True):
-        """Create an AfterPreprocessing matcher.
-
-        :param preprocessor: A function called with the matchee before
-            matching.
-        :param matcher: What to match the preprocessed matchee against.
-        :param annotate: Whether or not to annotate the matcher with
-            something explaining how we transformed the matchee. Defaults
-            to True.
-        """
-        self.preprocessor = preprocessor
-        self.matcher = matcher
-        self.annotate = annotate
-
-    def _str_preprocessor(self):
-        if isinstance(self.preprocessor, types.FunctionType):
-            return '<function %s>' % self.preprocessor.__name__
-        return str(self.preprocessor)
-
-    def __str__(self):
-        return "AfterPreprocessing(%s, %s)" % (
-            self._str_preprocessor(), self.matcher)
-
-    def match(self, value):
-        after = self.preprocessor(value)
-        if self.annotate:
-            matcher = Annotate(
-                "after %s on %r" % (self._str_preprocessor(), value),
-                self.matcher)
-        else:
-            matcher = self.matcher
-        return matcher.match(after)
-
-
-# This is the old, deprecated. spelling of the name, kept for backwards
-# compatibility.
-AfterPreproccessing = AfterPreprocessing
-
-
-class AllMatch(object):
-    """Matches if all provided values match the given matcher."""
-
-    def __init__(self, matcher):
-        self.matcher = matcher
-
-    def __str__(self):
-        return 'AllMatch(%s)' % (self.matcher,)
-
-    def match(self, values):
-        mismatches = []
-        for value in values:
-            mismatch = self.matcher.match(value)
-            if mismatch:
-                mismatches.append(mismatch)
-        if mismatches:
-            return MismatchesAll(mismatches)
-
-
-class AnyMatch(object):
-    """Matches if any of the provided values match the given matcher."""
-
-    def __init__(self, matcher):
-        self.matcher = matcher
-
-    def __str__(self):
-        return 'AnyMatch(%s)' % (self.matcher,)
-
-    def match(self, values):
-        mismatches = []
-        for value in values:
-            mismatch = self.matcher.match(value)
-            if mismatch:
-                mismatches.append(mismatch)
-            else:
-                return None
-        return MismatchesAll(mismatches)
-
-
-class MatchesPredicate(Matcher):
-    """Match if a given function returns True.
-
-    It is reasonably common to want to make a very simple matcher based on a
-    function that you already have that returns True or False given a single
-    argument (i.e. a predicate function).  This matcher makes it very easy to
-    do so. e.g.::
-
-      IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even')
-      self.assertThat(4, IsEven)
-    """
-
-    def __init__(self, predicate, message):
-        """Create a ``MatchesPredicate`` matcher.
-
-        :param predicate: A function that takes a single argument and returns
-            a value that will be interpreted as a boolean.
-        :param message: A message to describe a mismatch.  It will be formatted
-            with '%' and be given whatever was passed to ``match()``. Thus, it
-            needs to contain exactly one thing like '%s', '%d' or '%f'.
-        """
-        self.predicate = predicate
-        self.message = message
-
-    def __str__(self):
-        return '%s(%r, %r)' % (
-            self.__class__.__name__, self.predicate, self.message)
-
-    def match(self, x):
-        if not self.predicate(x):
-            return Mismatch(self.message % x)
-
-
-def MatchesPredicateWithParams(predicate, message, name=None):
-    """Match if a given parameterised function returns True.
-
-    It is reasonably common to want to make a very simple matcher based on a
-    function that you already have that returns True or False given some
-    arguments. This matcher makes it very easy to do so. e.g.::
-
-      HasLength = MatchesPredicate(
-          lambda x, y: len(x) == y, 'len({0}) is not {1}')
-      # This assertion will fail, as 'len([1, 2]) == 3' is False.
-      self.assertThat([1, 2], HasLength(3))
-
-    Note that unlike MatchesPredicate MatchesPredicateWithParams returns a
-    factory which you then customise to use by constructing an actual matcher
-    from it.
-
-    The predicate function should take the object to match as its first
-    parameter. Any additional parameters supplied when constructing a matcher
-    are supplied to the predicate as additional parameters when checking for a
-    match.
-
-    :param predicate: The predicate function.
-    :param message: A format string for describing mis-matches.
-    :param name: Optional replacement name for the matcher.
-    """
-    def construct_matcher(*args, **kwargs):
-        return _MatchesPredicateWithParams(
-            predicate, message, name, *args, **kwargs)
-    return construct_matcher
-
-
-class _MatchesPredicateWithParams(Matcher):
-
-    def __init__(self, predicate, message, name, *args, **kwargs):
-        """Create a ``MatchesPredicateWithParams`` matcher.
-
-        :param predicate: A function that takes an object to match and
-            additional params as given in ``*args`` and ``**kwargs``. The
-            result of the function will be interpreted as a boolean to
-            determine a match.
-        :param message: A message to describe a mismatch.  It will be formatted
-            with .format() and be given a tuple containing whatever was passed
-            to ``match()`` + ``*args`` in ``*args``, and whatever was passed to
-            ``**kwargs`` as its ``**kwargs``.
-
-            For instance, to format a single parameter::
-
-                "{0} is not a {1}"
-
-            To format a keyword arg::
-
-                "{0} is not a {type_to_check}"
-        :param name: What name to use for the matcher class. Pass None to use
-            the default.
-        """
-        self.predicate = predicate
-        self.message = message
-        self.name = name
-        self.args = args
-        self.kwargs = kwargs
-
-    def __str__(self):
-        args = [str(arg) for arg in self.args]
-        kwargs = ["%s=%s" % item for item in self.kwargs.items()]
-        args = ", ".join(args + kwargs)
-        if self.name is None:
-            name = 'MatchesPredicateWithParams(%r, %r)' % (
-                self.predicate, self.message)
-        else:
-            name = self.name
-        return '%s(%s)' % (name, args)
-
-    def match(self, x):
-        if not self.predicate(x, *self.args, **self.kwargs):
-            return Mismatch(
-                self.message.format(*((x,) + self.args), **self.kwargs))
diff --git a/lib/testtools/testtools/matchers/_impl.py b/lib/testtools/testtools/matchers/_impl.py
deleted file mode 100644
index 19a93af..0000000
--- a/lib/testtools/testtools/matchers/_impl.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-"""Matchers, a way to express complex assertions outside the testcase.
-
-Inspired by 'hamcrest'.
-
-Matcher provides the abstract API that all matchers need to implement.
-
-Bundled matchers are listed in __all__: a list can be obtained by running
-$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
-"""
-
-__all__ = [
-    'Matcher',
-    'Mismatch',
-    'MismatchDecorator',
-    'MismatchError',
-    ]
-
-from testtools.compat import (
-    _isbytes,
-    istext,
-    str_is_unicode,
-    text_repr
-    )
-
-
-class Matcher(object):
-    """A pattern matcher.
-
-    A Matcher must implement match and __str__ to be used by
-    testtools.TestCase.assertThat. Matcher.match(thing) returns None when
-    thing is completely matched, and a Mismatch object otherwise.
-
-    Matchers can be useful outside of test cases, as they are simply a
-    pattern matching language expressed as objects.
-
-    testtools.matchers is inspired by hamcrest, but is pythonic rather than
-    a Java transcription.
-    """
-
-    def match(self, something):
-        """Return None if this matcher matches something, a Mismatch otherwise.
-        """
-        raise NotImplementedError(self.match)
-
-    def __str__(self):
-        """Get a sensible human representation of the matcher.
-
-        This should include the parameters given to the matcher and any
-        state that would affect the matches operation.
-        """
-        raise NotImplementedError(self.__str__)
-
-
-class Mismatch(object):
-    """An object describing a mismatch detected by a Matcher."""
-
-    def __init__(self, description=None, details=None):
-        """Construct a `Mismatch`.
-
-        :param description: A description to use.  If not provided,
-            `Mismatch.describe` must be implemented.
-        :param details: Extra details about the mismatch.  Defaults
-            to the empty dict.
-        """
-        if description:
-            self._description = description
-        if details is None:
-            details = {}
-        self._details = details
-
-    def describe(self):
-        """Describe the mismatch.
-
-        This should be either a human-readable string or castable to a string.
-        In particular, is should either be plain ascii or unicode on Python 2,
-        and care should be taken to escape control characters.
-        """
-        try:
-            return self._description
-        except AttributeError:
-            raise NotImplementedError(self.describe)
-
-    def get_details(self):
-        """Get extra details about the mismatch.
-
-        This allows the mismatch to provide extra information beyond the basic
-        description, including large text or binary files, or debugging internals
-        without having to force it to fit in the output of 'describe'.
-
-        The testtools assertion assertThat will query get_details and attach
-        all its values to the test, permitting them to be reported in whatever
-        manner the test environment chooses.
-
-        :return: a dict mapping names to Content objects. name is a string to
-            name the detail, and the Content object is the detail to add
-            to the result. For more information see the API to which items from
-            this dict are passed testtools.TestCase.addDetail.
-        """
-        return getattr(self, '_details', {})
-
-    def __repr__(self):
-        return  "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
-            id(self), self.__dict__)
-
-
-class MismatchError(AssertionError):
-    """Raised when a mismatch occurs."""
-
-    # This class exists to work around
-    # <https://bugs.launchpad.net/testtools/+bug/804127>.  It provides a
-    # guaranteed way of getting a readable exception, no matter what crazy
-    # characters are in the matchee, matcher or mismatch.
-
-    def __init__(self, matchee, matcher, mismatch, verbose=False):
-        super(MismatchError, self).__init__()
-        self.matchee = matchee
-        self.matcher = matcher
-        self.mismatch = mismatch
-        self.verbose = verbose
-
-    def __str__(self):
-        difference = self.mismatch.describe()
-        if self.verbose:
-            # GZ 2011-08-24: Smelly API? Better to take any object and special
-            #                case text inside?
-            if istext(self.matchee) or _isbytes(self.matchee):
-                matchee = text_repr(self.matchee, multiline=False)
-            else:
-                matchee = repr(self.matchee)
-            return (
-                'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
-                % (matchee, self.matcher, difference))
-        else:
-            return difference
-
-    if not str_is_unicode:
-
-        __unicode__ = __str__
-
-        def __str__(self):
-            return self.__unicode__().encode("ascii", "backslashreplace")
-
-
-class MismatchDecorator(object):
-    """Decorate a ``Mismatch``.
-
-    Forwards all messages to the original mismatch object.  Probably the best
-    way to use this is inherit from this class and then provide your own
-    custom decoration logic.
-    """
-
-    def __init__(self, original):
-        """Construct a `MismatchDecorator`.
-
-        :param original: A `Mismatch` object to decorate.
-        """
-        self.original = original
-
-    def __repr__(self):
-        return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
-
-    def describe(self):
-        return self.original.describe()
-
-    def get_details(self):
-        return self.original.get_details()
-
-
-# Signal that this is part of the testing framework, and that code from this
-# should not normally appear in tracebacks.
-__unittest = True
diff --git a/lib/testtools/testtools/monkey.py b/lib/testtools/testtools/monkey.py
deleted file mode 100644
index ba0ac8f..0000000
--- a/lib/testtools/testtools/monkey.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Helpers for monkey-patching Python code."""
-
-__all__ = [
-    'MonkeyPatcher',
-    'patch',
-    ]
-
-
-class MonkeyPatcher(object):
-    """A set of monkey-patches that can be applied and removed all together.
-
-    Use this to cover up attributes with new objects. Particularly useful for
-    testing difficult code.
-    """
-
-    # Marker used to indicate that the patched attribute did not exist on the
-    # object before we patched it.
-    _NO_SUCH_ATTRIBUTE = object()
-
-    def __init__(self, *patches):
-        """Construct a `MonkeyPatcher`.
-
-        :param patches: The patches to apply, each should be (obj, name,
-            new_value). Providing patches here is equivalent to calling
-            `add_patch`.
-        """
-        # List of patches to apply in (obj, name, value).
-        self._patches_to_apply = []
-        # List of the original values for things that have been patched.
-        # (obj, name, value) format.
-        self._originals = []
-        for patch in patches:
-            self.add_patch(*patch)
-
-    def add_patch(self, obj, name, value):
-        """Add a patch to overwrite 'name' on 'obj' with 'value'.
-
-        The attribute C{name} on C{obj} will be assigned to C{value} when
-        C{patch} is called or during C{run_with_patches}.
-
-        You can restore the original values with a call to restore().
-        """
-        self._patches_to_apply.append((obj, name, value))
-
-    def patch(self):
-        """Apply all of the patches that have been specified with `add_patch`.
-
-        Reverse this operation using L{restore}.
-        """
-        for obj, name, value in self._patches_to_apply:
-            original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
-            self._originals.append((obj, name, original_value))
-            setattr(obj, name, value)
-
-    def restore(self):
-        """Restore all original values to any patched objects.
-
-        If the patched attribute did not exist on an object before it was
-        patched, `restore` will delete the attribute so as to return the
-        object to its original state.
-        """
-        while self._originals:
-            obj, name, value = self._originals.pop()
-            if value is self._NO_SUCH_ATTRIBUTE:
-                delattr(obj, name)
-            else:
-                setattr(obj, name, value)
-
-    def run_with_patches(self, f, *args, **kw):
-        """Run 'f' with the given args and kwargs with all patches applied.
-
-        Restores all objects to their original state when finished.
-        """
-        self.patch()
-        try:
-            return f(*args, **kw)
-        finally:
-            self.restore()
-
-
-def patch(obj, attribute, value):
-    """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
-
-    If 'attribute' is not set on 'obj' already, then the returned callable
-    will delete the attribute when called.
-
-    :param obj: An object to monkey-patch.
-    :param attribute: The name of the attribute to patch.
-    :param value: The value to set 'obj.attribute' to.
-    :return: A nullary callable that, when run, will restore 'obj' to its
-        original state.
-    """
-    patcher = MonkeyPatcher((obj, attribute, value))
-    patcher.patch()
-    return patcher.restore
diff --git a/lib/testtools/testtools/run.py b/lib/testtools/testtools/run.py
deleted file mode 100755
index 8421f25..0000000
--- a/lib/testtools/testtools/run.py
+++ /dev/null
@@ -1,535 +0,0 @@
-# Copyright (c) 2009 testtools developers. See LICENSE for details.
-
-"""python -m testtools.run testspec [testspec...]
-
-Run some tests with the testtools extended API.
-
-For instance, to run the testtools test suite.
- $ python -m testtools.run testtools.tests.test_suite
-"""
-
-from functools import partial
-import os.path
-import unittest
-import sys
-
-from extras import safe_hasattr
-
-from testtools import TextTestResult, testcase
-from testtools.compat import classtypes, istext, unicode_output_stream
-from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests
-
-
-defaultTestLoader = unittest.defaultTestLoader
-defaultTestLoaderCls = unittest.TestLoader
-
-if getattr(defaultTestLoader, 'discover', None) is None:
-    try:
-        import discover
-        defaultTestLoader = discover.DiscoveringTestLoader()
-        defaultTestLoaderCls = discover.DiscoveringTestLoader
-        have_discover = True
-        discover_impl = discover
-    except ImportError:
-        have_discover = False
-else:
-    have_discover = True
-    discover_impl = unittest.loader
-discover_fixed = False
-
-
-def list_test(test):
-    """Return the test ids that would be run if test() was run.
-
-    When things fail to import they can be represented as well, though
-    we use an ugly hack (see http://bugs.python.org/issue19746 for details)
-    to determine that. The difference matters because if a user is
-    filtering tests to run on the returned ids, a failed import can reduce
-    the visible tests but it can be impossible to tell that the selected
-    test would have been one of the imported ones.
-
-    :return: A tuple of test ids that would run and error strings
-        describing things that failed to import.
-    """
-    unittest_import_strs = set([
-        'unittest.loader.ModuleImportFailure.', 'discover.ModuleImportFailure.'
-        ])
-    test_ids = []
-    errors = []
-    for test in iterate_tests(test):
-        # Much ugly.
-        for prefix in unittest_import_strs:
-            if test.id().startswith(prefix):
-                errors.append(test.id()[len(prefix):])
-                break
-        else:
-            test_ids.append(test.id())
-    return test_ids, errors
-
-
-class TestToolsTestRunner(object):
-    """ A thunk object to support unittest.TestProgram."""
-
-    def __init__(self, verbosity=None, failfast=None, buffer=None,
-        stdout=None):
-        """Create a TestToolsTestRunner.
-
-        :param verbosity: Ignored.
-        :param failfast: Stop running tests at the first failure.
-        :param buffer: Ignored.
-        :param stdout: Stream to use for stdout.
-        """
-        self.failfast = failfast
-        if stdout is None:
-            stdout = sys.stdout
-        self.stdout = stdout
-
-    def list(self, test):
-        """List the tests that would be run if test() was run."""
-        test_ids, errors = list_test(test)
-        for test_id in test_ids:
-            self.stdout.write('%s\n' % test_id)
-        if errors:
-            self.stdout.write('Failed to import\n')
-            for test_id in errors:
-                self.stdout.write('%s\n' % test_id)
-            sys.exit(2)
-
-    def run(self, test):
-        "Run the given test case or test suite."
-        result = TextTestResult(
-            unicode_output_stream(self.stdout), failfast=self.failfast)
-        result.startTestRun()
-        try:
-            return test.run(result)
-        finally:
-            result.stopTestRun()
-
-
-####################
-# Taken from python 2.7 and slightly modified for compatibility with
-# older versions. Delete when 2.7 is the oldest supported version.
-# Modifications:
-#  - Use have_discover to raise an error if the user tries to use
-#    discovery on an old version and doesn't have discover installed.
-#  - If --catch is given check that installHandler is available, as
-#    it won't be on old python versions.
-#  - print calls have been been made single-source python3 compatibile.
-#  - exception handling likewise.
-#  - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
-#    removed.
-#  - A tweak has been added to detect 'python -m *.run' and use a
-#    better progName in that case.
-#  - self.module is more comprehensively set to None when being invoked from
-#    the commandline - __name__ is used as a sentinel value.
-#  - --list has been added which can list tests (should be upstreamed).
-#  - --load-list has been added which can reduce the tests used (should be
-#    upstreamed).
-#  - The limitation of using getopt is declared to the user.
-#  - http://bugs.python.org/issue16709 is worked around, by sorting tests when
-#    discover is used.
-#  - We monkey-patch the discover and unittest loaders to address
-#     http://bugs.python.org/issue16662 with the proposed upstream patch.
-
-FAILFAST     = "  -f, --failfast   Stop on first failure\n"
-CATCHBREAK   = "  -c, --catch      Catch control-C and display results\n"
-BUFFEROUTPUT = "  -b, --buffer     Buffer stdout and stderr during test runs\n"
-
-USAGE_AS_MAIN = """\
-Usage: %(progName)s [options] [tests]
-
-Options:
-  -h, --help       Show this message
-  -v, --verbose    Verbose output
-  -q, --quiet      Minimal output
-  -l, --list       List tests rather than executing them.
-  --load-list      Specifies a file containing test ids, only tests matching
-                   those ids are executed.
-%(failfast)s%(catchbreak)s%(buffer)s
-Examples:
-  %(progName)s test_module               - run tests from test_module
-  %(progName)s module.TestClass          - run tests from module.TestClass
-  %(progName)s module.Class.test_method  - run specified test method
-
-All options must come before [tests].  [tests] can be a list of any number of
-test modules, classes and test methods.
-
-Alternative Usage: %(progName)s discover [options]
-
-Options:
-  -v, --verbose    Verbose output
-%(failfast)s%(catchbreak)s%(buffer)s  -s directory     Directory to start discovery ('.' default)
-  -p pattern       Pattern to match test files ('test*.py' default)
-  -t directory     Top level directory of project (default to
-                   start directory)
-  -l, --list       List tests rather than executing them.
-  --load-list      Specifies a file containing test ids, only tests matching
-                   those ids are executed.
-
-For test discovery all test modules must be importable from the top
-level directory of the project.
-"""
-
-
-class TestProgram(object):
-    """A command-line program that runs a set of tests; this is primarily
-       for making test modules conveniently executable.
-    """
-    USAGE = USAGE_AS_MAIN
-
-    # defaults for testing
-    failfast = catchbreak = buffer = progName = None
-
-    def __init__(self, module=__name__, defaultTest=None, argv=None,
-                    testRunner=None, testLoader=defaultTestLoader,
-                    exit=True, verbosity=1, failfast=None, catchbreak=None,
-                    buffer=None, stdout=None):
-        if module == __name__:
-            self.module = None
-        elif istext(module):
-            self.module = __import__(module)
-            for part in module.split('.')[1:]:
-                self.module = getattr(self.module, part)
-        else:
-            self.module = module
-        if argv is None:
-            argv = sys.argv
-        if stdout is None:
-            stdout = sys.stdout
-        self.stdout = stdout
-
-        self.exit = exit
-        self.failfast = failfast
-        self.catchbreak = catchbreak
-        self.verbosity = verbosity
-        self.buffer = buffer
-        self.defaultTest = defaultTest
-        self.listtests = False
-        self.load_list = None
-        self.testRunner = testRunner
-        self.testLoader = testLoader
-        progName = argv[0]
-        if progName.endswith('%srun.py' % os.path.sep):
-            elements = progName.split(os.path.sep)
-            progName = '%s.run' % elements[-2]
-        else:
-            progName = os.path.basename(argv[0])
-        self.progName = progName
-        self.parseArgs(argv)
-        if self.load_list:
-            # TODO: preserve existing suites (like testresources does in
-            # OptimisingTestSuite.add, but with a standard protocol).
-            # This is needed because the load_tests hook allows arbitrary
-            # suites, even if that is rarely used.
-            source = open(self.load_list, 'rb')
-            try:
-                lines = source.readlines()
-            finally:
-                source.close()
-            test_ids = set(line.strip().decode('utf-8') for line in lines)
-            self.test = filter_by_ids(self.test, test_ids)
-        if not self.listtests:
-            self.runTests()
-        else:
-            runner = self._get_runner()
-            if safe_hasattr(runner, 'list'):
-                runner.list(self.test)
-            else:
-                for test in iterate_tests(self.test):
-                    self.stdout.write('%s\n' % test.id())
-
-    def usageExit(self, msg=None):
-        if msg:
-            print(msg)
-        usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
-                 'buffer': ''}
-        if self.failfast != False:
-            usage['failfast'] = FAILFAST
-        if self.catchbreak != False:
-            usage['catchbreak'] = CATCHBREAK
-        if self.buffer != False:
-            usage['buffer'] = BUFFEROUTPUT
-        print(self.USAGE % usage)
-        sys.exit(2)
-
-    def parseArgs(self, argv):
-        if len(argv) > 1 and argv[1].lower() == 'discover':
-            self._do_discovery(argv[2:])
-            return
-
-        import getopt
-        long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer',
-            'list', 'load-list=']
-        try:
-            options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
-            for opt, value in options:
-                if opt in ('-h','-H','--help'):
-                    self.usageExit()
-                if opt in ('-q','--quiet'):
-                    self.verbosity = 0
-                if opt in ('-v','--verbose'):
-                    self.verbosity = 2
-                if opt in ('-f','--failfast'):
-                    if self.failfast is None:
-                        self.failfast = True
-                    # Should this raise an exception if -f is not valid?
-                if opt in ('-c','--catch'):
-                    if self.catchbreak is None:
-                        self.catchbreak = True
-                    # Should this raise an exception if -c is not valid?
-                if opt in ('-b','--buffer'):
-                    if self.buffer is None:
-                        self.buffer = True
-                    # Should this raise an exception if -b is not valid?
-                if opt in ('-l', '--list'):
-                    self.listtests = True
-                if opt == '--load-list':
-                    self.load_list = value
-            if len(args) == 0 and self.defaultTest is None:
-                # createTests will load tests from self.module
-                self.testNames = None
-            elif len(args) > 0:
-                self.testNames = args
-            else:
-                self.testNames = (self.defaultTest,)
-            self.createTests()
-        except getopt.error:
-            self.usageExit(sys.exc_info()[1])
-
-    def createTests(self):
-        if self.testNames is None:
-            self.test = self.testLoader.loadTestsFromModule(self.module)
-        else:
-            self.test = self.testLoader.loadTestsFromNames(self.testNames,
-                                                           self.module)
-
-    def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
-        # handle command line args for test discovery
-        if not have_discover:
-            raise AssertionError("Unable to use discovery, must use python 2.7 "
-                    "or greater, or install the discover package.")
-        _fix_discovery()
-        self.progName = '%s discover' % self.progName
-        import optparse
-        parser = optparse.OptionParser()
-        parser.prog = self.progName
-        parser.add_option('-v', '--verbose', dest='verbose', default=False,
-                          help='Verbose output', action='store_true')
-        if self.failfast != False:
-            parser.add_option('-f', '--failfast', dest='failfast', default=False,
-                              help='Stop on first fail or error',
-                              action='store_true')
-        if self.catchbreak != False:
-            parser.add_option('-c', '--catch', dest='catchbreak', default=False,
-                              help='Catch ctrl-C and display results so far',
-                              action='store_true')
-        if self.buffer != False:
-            parser.add_option('-b', '--buffer', dest='buffer', default=False,
-                              help='Buffer stdout and stderr during tests',
-                              action='store_true')
-        parser.add_option('-s', '--start-directory', dest='start', default='.',
-                          help="Directory to start discovery ('.' default)")
-        parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
-                          help="Pattern to match tests ('test*.py' default)")
-        parser.add_option('-t', '--top-level-directory', dest='top', default=None,
-                          help='Top level directory of project (defaults to start directory)')
-        parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
-                          help='List tests rather than running them.')
-        parser.add_option('--load-list', dest='load_list', default=None,
-                          help='Specify a filename containing the test ids to use.')
-
-        options, args = parser.parse_args(argv)
-        if len(args) > 3:
-            self.usageExit()
-
-        for name, value in zip(('start', 'pattern', 'top'), args):
-            setattr(options, name, value)
-
-        # only set options from the parsing here
-        # if they weren't set explicitly in the constructor
-        if self.failfast is None:
-            self.failfast = options.failfast
-        if self.catchbreak is None:
-            self.catchbreak = options.catchbreak
-        if self.buffer is None:
-            self.buffer = options.buffer
-        self.listtests = options.listtests
-        self.load_list = options.load_list
-
-        if options.verbose:
-            self.verbosity = 2
-
-        start_dir = options.start
-        pattern = options.pattern
-        top_level_dir = options.top
-
-        loader = Loader()
-        # See http://bugs.python.org/issue16709
-        # While sorting here is intrusive, its better than being random.
-        # Rules for the sort:
-        # - standard suites are flattened, and the resulting tests sorted by
-        #   id.
-        # - non-standard suites are preserved as-is, and sorted into position
-        #   by the first test found by iterating the suite.
-        # We do this by a DSU process: flatten and grab a key, sort, strip the
-        # keys.
-        loaded = loader.discover(start_dir, pattern, top_level_dir)
-        self.test = sorted_tests(loaded)
-
-    def runTests(self):
-        if (self.catchbreak
-            and getattr(unittest, 'installHandler', None) is not None):
-            unittest.installHandler()
-        testRunner = self._get_runner()
-        self.result = testRunner.run(self.test)
-        if self.exit:
-            sys.exit(not self.result.wasSuccessful())
-
-    def _get_runner(self):
-        if self.testRunner is None:
-            self.testRunner = TestToolsTestRunner
-        try:
-            testRunner = self.testRunner(verbosity=self.verbosity,
-                                         failfast=self.failfast,
-                                         buffer=self.buffer,
-                                         stdout=self.stdout)
-        except TypeError:
-            # didn't accept the verbosity, buffer, failfast or stdout arguments
-            # Try with the prior contract
-            try:
-                testRunner = self.testRunner(verbosity=self.verbosity,
-                                             failfast=self.failfast,
-                                             buffer=self.buffer)
-            except TypeError:
-                # Now try calling it with defaults
-                try:
-                    testRunner = self.testRunner()
-                except TypeError:
-                    # it is assumed to be a TestRunner instance
-                    testRunner = self.testRunner
-        return testRunner
-
-
-def _fix_discovery():
-    # Monkey patch in the bugfix from http://bugs.python.org/issue16662
-    # - the code here is a straight copy from the Python core tree
-    # with the patch applied.
-    global discover_fixed
-    if discover_fixed:
-        return
-    # Do we have a fixed Python?
-    # (not committed upstream yet - so we can't uncomment this code,
-    # but when it gets committed, the next version to be released won't
-    # need monkey patching.
-    # if sys.version_info[:2] > (3, 4):
-    #     discover_fixed = True
-    #     return
-    if not have_discover:
-        return
-    if safe_hasattr(discover_impl, '_jython_aware_splitext'):
-        _jython_aware_splitext = discover_impl._jython_aware_splitext
-    else:
-        def _jython_aware_splitext(path):
-            if path.lower().endswith('$py.class'):
-                return path[:-9]
-            return os.path.splitext(path)[0]
-    def loadTestsFromModule(self, module, use_load_tests=True, pattern=None):
-        """Return a suite of all tests cases contained in the given module"""
-        # use_load_tests is preserved for compatability though it was never
-        # an official API.
-        # pattern is not an official API either; it is used in discovery to
-        # chain the requested pattern down.
-        tests = []
-        for name in dir(module):
-            obj = getattr(module, name)
-            if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
-                tests.append(self.loadTestsFromTestCase(obj))
-
-        load_tests = getattr(module, 'load_tests', None)
-        tests = self.suiteClass(tests)
-        if use_load_tests and load_tests is not None:
-            try:
-                return load_tests(self, tests, pattern)
-            except Exception as e:
-                return discover_impl._make_failed_load_tests(
-                    module.__name__, e, self.suiteClass)
-        return tests
-    def _find_tests(self, start_dir, pattern, namespace=False):
-        """Used by discovery. Yields test suites it loads."""
-        paths = sorted(os.listdir(start_dir))
-
-        for path in paths:
-            full_path = os.path.join(start_dir, path)
-            if os.path.isfile(full_path):
-                if not discover_impl.VALID_MODULE_NAME.match(path):
-                    # valid Python identifiers only
-                    continue
-                if not self._match_path(path, full_path, pattern):
-                    continue
-                # if the test file matches, load it
-                name = self._get_name_from_path(full_path)
-                try:
-                    module = self._get_module_from_name(name)
-                except testcase.TestSkipped as e:
-                    yield discover_impl._make_skipped_test(
-                        name, e, self.suiteClass)
-                except:
-                    yield discover_impl._make_failed_import_test(
-                        name, self.suiteClass)
-                else:
-                    mod_file = os.path.abspath(getattr(module, '__file__', full_path))
-                    realpath = _jython_aware_splitext(
-                        os.path.realpath(mod_file))
-                    fullpath_noext = _jython_aware_splitext(
-                        os.path.realpath(full_path))
-                    if realpath.lower() != fullpath_noext.lower():
-                        module_dir = os.path.dirname(realpath)
-                        mod_name = _jython_aware_splitext(
-                            os.path.basename(full_path))
-                        expected_dir = os.path.dirname(full_path)
-                        msg = ("%r module incorrectly imported from %r. Expected %r. "
-                               "Is this module globally installed?")
-                        raise ImportError(msg % (mod_name, module_dir, expected_dir))
-                    yield self.loadTestsFromModule(module, pattern=pattern)
-            elif os.path.isdir(full_path):
-                if (not namespace and
-                    not os.path.isfile(os.path.join(full_path, '__init__.py'))):
-                    continue
-
-                load_tests = None
-                tests = None
-                name = self._get_name_from_path(full_path)
-                try:
-                    package = self._get_module_from_name(name)
-                except testcase.TestSkipped as e:
-                    yield discover_impl._make_skipped_test(
-                        name, e, self.suiteClass)
-                except:
-                    yield discover_impl._make_failed_import_test(
-                        name, self.suiteClass)
-                else:
-                    load_tests = getattr(package, 'load_tests', None)
-                    tests = self.loadTestsFromModule(package, pattern=pattern)
-                    if tests is not None:
-                        # tests loaded from package file
-                        yield tests
-
-                    if load_tests is not None:
-                        # loadTestsFromModule(package) has load_tests for us.
-                        continue
-                    # recurse into the package
-                    pkg_tests =  self._find_tests(
-                        full_path, pattern, namespace=namespace)
-                    for test in pkg_tests:
-                        yield test
-    defaultTestLoaderCls.loadTestsFromModule = loadTestsFromModule
-    defaultTestLoaderCls._find_tests = _find_tests
-
-################
-
-def main(argv, stdout):
-    program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout),
-        stdout=stdout)
-
-if __name__ == '__main__':
-    main(sys.argv, sys.stdout)
diff --git a/lib/testtools/testtools/runtest.py b/lib/testtools/testtools/runtest.py
deleted file mode 100644
index a29cdd6..0000000
--- a/lib/testtools/testtools/runtest.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
-
-"""Individual test case execution."""
-
-__all__ = [
-    'MultipleExceptions',
-    'RunTest',
-    ]
-
-import sys
-
-from testtools.testresult import ExtendedToOriginalDecorator
-
-
-class MultipleExceptions(Exception):
-    """Represents many exceptions raised from some operation.
-
-    :ivar args: The sys.exc_info() tuples for each exception.
-    """
-
-
-class RunTest(object):
-    """An object to run a test.
-
-    RunTest objects are used to implement the internal logic involved in
-    running a test. TestCase.__init__ stores _RunTest as the class of RunTest
-    to execute.  Passing the runTest= parameter to TestCase.__init__ allows a
-    different RunTest class to be used to execute the test.
-
-    Subclassing or replacing RunTest can be useful to add functionality to the
-    way that tests are run in a given project.
-
-    :ivar case: The test case that is to be run.
-    :ivar result: The result object a case is reporting to.
-    :ivar handlers: A list of (ExceptionClass, handler_function) for
-        exceptions that should be caught if raised from the user
-        code. Exceptions that are caught are checked against this list in
-        first to last order.  There is a catch-all of 'Exception' at the end
-        of the list, so to add a new exception to the list, insert it at the
-        front (which ensures that it will be checked before any existing base
-        classes in the list. If you add multiple exceptions some of which are
-        subclasses of each other, add the most specific exceptions last (so
-        they come before their parent classes in the list).
-    :ivar exception_caught: An object returned when _run_user catches an
-        exception.
-    :ivar _exceptions: A list of caught exceptions, used to do the single
-        reporting of error/failure/skip etc.
-    """
-
-    def __init__(self, case, handlers=None, last_resort=None):
-        """Create a RunTest to run a case.
-
-        :param case: A testtools.TestCase test case object.
-        :param handlers: Exception handlers for this RunTest. These are stored
-            in self.handlers and can be modified later if needed.
-        :param last_resort: A handler of last resort: any exception which is
-            not handled by handlers will cause the last resort handler to be
-            called as last_resort(exc_info), and then the exception will be
-            raised - aborting the test run as this is inside the runner
-            machinery rather than the confined context of the test.
-        """
-        self.case = case
-        self.handlers = handlers or []
-        self.exception_caught = object()
-        self._exceptions = []
-        self.last_resort = last_resort or (lambda case, result, exc: None)
-
-    def run(self, result=None):
-        """Run self.case reporting activity to result.
-
-        :param result: Optional testtools.TestResult to report activity to.
-        :return: The result object the test was run against.
-        """
-        if result is None:
-            actual_result = self.case.defaultTestResult()
-            actual_result.startTestRun()
-        else:
-            actual_result = result
-        try:
-            return self._run_one(actual_result)
-        finally:
-            if result is None:
-                actual_result.stopTestRun()
-
-    def _run_one(self, result):
-        """Run one test reporting to result.
-
-        :param result: A testtools.TestResult to report activity to.
-            This result object is decorated with an ExtendedToOriginalDecorator
-            to ensure that the latest TestResult API can be used with
-            confidence by client code.
-        :return: The result object the test was run against.
-        """
-        return self._run_prepared_result(ExtendedToOriginalDecorator(result))
-
-    def _run_prepared_result(self, result):
-        """Run one test reporting to result.
-
-        :param result: A testtools.TestResult to report activity to.
-        :return: The result object the test was run against.
-        """
-        result.startTest(self.case)
-        self.result = result
-        try:
-            self._exceptions = []
-            self._run_core()
-            if self._exceptions:
-                # One or more caught exceptions, now trigger the test's
-                # reporting method for just one.
-                e = self._exceptions.pop()
-                for exc_class, handler in self.handlers:
-                    if isinstance(e, exc_class):
-                        handler(self.case, self.result, e)
-                        break
-                else:
-                    self.last_resort(self.case, self.result, e)
-                    raise e
-        finally:
-            result.stopTest(self.case)
-        return result
-
-    def _run_core(self):
-        """Run the user supplied test code."""
-        test_method = self.case._get_test_method()
-        if getattr(test_method, '__unittest_skip__', False):
-            self.result.addSkip(
-                self.case,
-                reason=getattr(test_method, '__unittest_skip_why__', None)
-            )
-            return
-
-        if self.exception_caught == self._run_user(self.case._run_setup,
-            self.result):
-            # Don't run the test method if we failed getting here.
-            self._run_cleanups(self.result)
-            return
-        # Run everything from here on in. If any of the methods raise an
-        # exception we'll have failed.
-        failed = False
-        try:
-            if self.exception_caught == self._run_user(
-                self.case._run_test_method, self.result):
-                failed = True
-        finally:
-            try:
-                if self.exception_caught == self._run_user(
-                    self.case._run_teardown, self.result):
-                    failed = True
-            finally:
-                try:
-                    if self.exception_caught == self._run_user(
-                        self._run_cleanups, self.result):
-                        failed = True
-                finally:
-                    if getattr(self.case, 'force_failure', None):
-                        self._run_user(_raise_force_fail_error)
-                        failed = True
-                    if not failed:
-                        self.result.addSuccess(self.case,
-                            details=self.case.getDetails())
-
-    def _run_cleanups(self, result):
-        """Run the cleanups that have been added with addCleanup.
-
-        See the docstring for addCleanup for more information.
-
-        :return: None if all cleanups ran without error,
-            ``exception_caught`` if there was an error.
-        """
-        failing = False
-        while self.case._cleanups:
-            function, arguments, keywordArguments = self.case._cleanups.pop()
-            got_exception = self._run_user(
-                function, *arguments, **keywordArguments)
-            if got_exception == self.exception_caught:
-                failing = True
-        if failing:
-            return self.exception_caught
-
-    def _run_user(self, fn, *args, **kwargs):
-        """Run a user supplied function.
-
-        Exceptions are processed by `_got_user_exception`.
-
-        :return: Either whatever 'fn' returns or ``exception_caught`` if
-            'fn' raised an exception.
-        """
-        try:
-            return fn(*args, **kwargs)
-        except:
-            return self._got_user_exception(sys.exc_info())
-
-    def _got_user_exception(self, exc_info, tb_label='traceback'):
-        """Called when user code raises an exception.
-
-        If 'exc_info' is a `MultipleExceptions`, then we recurse into it
-        unpacking the errors that it's made up from.
-
-        :param exc_info: A sys.exc_info() tuple for the user error.
-        :param tb_label: An optional string label for the error.  If
-            not specified, will default to 'traceback'.
-        :return: 'exception_caught' if we catch one of the exceptions that
-            have handlers in 'handlers', otherwise raise the error.
-        """
-        if exc_info[0] is MultipleExceptions:
-            for sub_exc_info in exc_info[1].args:
-                self._got_user_exception(sub_exc_info, tb_label)
-            return self.exception_caught
-        try:
-            e = exc_info[1]
-            self.case.onException(exc_info, tb_label=tb_label)
-        finally:
-            del exc_info
-        self._exceptions.append(e)
-        # Yes, this means we catch everything - we re-raise KeyBoardInterrupt
-        # etc later, after tearDown and cleanUp - since those may be cleaning up
-        # external processes.
-        return self.exception_caught
-
-
-def _raise_force_fail_error():
-    raise AssertionError("Forced Test Failure")
-
-
-# Signal that this is part of the testing framework, and that code from this
-# should not normally appear in tracebacks.
-__unittest = True
diff --git a/lib/testtools/testtools/tags.py b/lib/testtools/testtools/tags.py
deleted file mode 100644
index b55bd38..0000000
--- a/lib/testtools/testtools/tags.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) 2012 testtools developers. See LICENSE for details.
-
-"""Tag support."""
-
-
-class TagContext(object):
-    """A tag context."""
-
-    def __init__(self, parent=None):
-        """Create a new TagContext.
-
-        :param parent: If provided, uses this as the parent context.  Any tags
-            that are current on the parent at the time of construction are
-            current in this context.
-        """
-        self.parent = parent
-        self._tags = set()
-        if parent:
-            self._tags.update(parent.get_current_tags())
-
-    def get_current_tags(self):
-        """Return any current tags."""
-        return set(self._tags)
-
-    def change_tags(self, new_tags, gone_tags):
-        """Change the tags on this context.
-
-        :param new_tags: A set of tags to add to this context.
-        :param gone_tags: A set of tags to remove from this context.
-        :return: The tags now current on this context.
-        """
-        self._tags.update(new_tags)
-        self._tags.difference_update(gone_tags)
-        return self.get_current_tags()
diff --git a/lib/testtools/testtools/testcase.py b/lib/testtools/testtools/testcase.py
deleted file mode 100644
index b646f82..0000000
--- a/lib/testtools/testtools/testcase.py
+++ /dev/null
@@ -1,1022 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-"""Test case related stuff."""
-
-__metaclass__ = type
-__all__ = [
-    'attr',
-    'clone_test_with_new_id',
-    'ExpectedException',
-    'gather_details',
-    'run_test_with',
-    'skip',
-    'skipIf',
-    'skipUnless',
-    'TestCase',
-    ]
-
-import copy
-import functools
-import itertools
-import sys
-import types
-import unittest
-
-from extras import (
-    safe_hasattr,
-    try_import,
-    )
-
-from testtools import (
-    content,
-    )
-from testtools.compat import (
-    advance_iterator,
-    reraise,
-    )
-from testtools.matchers import (
-    Annotate,
-    Contains,
-    Equals,
-    MatchesAll,
-    MatchesException,
-    MismatchError,
-    Is,
-    IsInstance,
-    Not,
-    Raises,
-    )
-from testtools.monkey import patch
-from testtools.runtest import RunTest
-from testtools.testresult import (
-    ExtendedToOriginalDecorator,
-    TestResult,
-    )
-
-wraps = try_import('functools.wraps')
-
-class TestSkipped(Exception):
-    """Raised within TestCase.run() when a test is skipped."""
-TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
-TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
-
-
-class _UnexpectedSuccess(Exception):
-    """An unexpected success was raised.
-
-    Note that this exception is private plumbing in testtools' testcase
-    module.
-    """
-_UnexpectedSuccess = try_import(
-    'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess)
-_UnexpectedSuccess = try_import(
-    'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
-
-class _ExpectedFailure(Exception):
-    """An expected failure occured.
-
-    Note that this exception is private plumbing in testtools' testcase
-    module.
-    """
-_ExpectedFailure = try_import(
-    'unittest2.case._ExpectedFailure', _ExpectedFailure)
-_ExpectedFailure = try_import(
-    'unittest.case._ExpectedFailure', _ExpectedFailure)
-
-
-# Copied from unittest before python 3.4 release. Used to maintain
-# compatibility with unittest sub-test feature. Users should not use this
-# directly.
-def _expectedFailure(func):
-    @functools.wraps(func)
-    def wrapper(*args, **kwargs):
-        try:
-            func(*args, **kwargs)
-        except Exception:
-            raise _ExpectedFailure(sys.exc_info())
-        raise _UnexpectedSuccess
-    return wrapper
-
-
-def run_test_with(test_runner, **kwargs):
-    """Decorate a test as using a specific ``RunTest``.
-
-    e.g.::
-
-      @run_test_with(CustomRunner, timeout=42)
-      def test_foo(self):
-          self.assertTrue(True)
-
-    The returned decorator works by setting an attribute on the decorated
-    function.  `TestCase.__init__` looks for this attribute when deciding on a
-    ``RunTest`` factory.  If you wish to use multiple decorators on a test
-    method, then you must either make this one the top-most decorator, or you
-    must write your decorators so that they update the wrapping function with
-    the attributes of the wrapped function.  The latter is recommended style
-    anyway.  ``functools.wraps``, ``functools.wrapper`` and
-    ``twisted.python.util.mergeFunctionMetadata`` can help you do this.
-
-    :param test_runner: A ``RunTest`` factory that takes a test case and an
-        optional list of exception handlers.  See ``RunTest``.
-    :param kwargs: Keyword arguments to pass on as extra arguments to
-        'test_runner'.
-    :return: A decorator to be used for marking a test as needing a special
-        runner.
-    """
-    def decorator(function):
-        # Set an attribute on 'function' which will inform TestCase how to
-        # make the runner.
-        def _run_test_with(case, handlers=None, last_resort=None):
-            try:
-                return test_runner(
-                    case, handlers=handlers, last_resort=last_resort,
-                    **kwargs)
-            except TypeError:
-                # Backwards compat: if we can't call the constructor
-                # with last_resort, try without that.
-                return test_runner(case, handlers=handlers, **kwargs)
-        function._run_test_with = _run_test_with
-        return function
-    return decorator
-
-
-def _copy_content(content_object):
-    """Make a copy of the given content object.
-
-    The content within ``content_object`` is iterated and saved. This is
-    useful when the source of the content is volatile, a log file in a
-    temporary directory for example.
-
-    :param content_object: A `content.Content` instance.
-    :return: A `content.Content` instance with the same mime-type as
-        ``content_object`` and a non-volatile copy of its content.
-    """
-    content_bytes = list(content_object.iter_bytes())
-    content_callback = lambda: content_bytes
-    return content.Content(content_object.content_type, content_callback)
-
-
-def gather_details(source_dict, target_dict):
-    """Merge the details from ``source_dict`` into ``target_dict``.
-
-    :param source_dict: A dictionary of details will be gathered.
-    :param target_dict: A dictionary into which details will be gathered.
-    """
-    for name, content_object in source_dict.items():
-        new_name = name
-        disambiguator = itertools.count(1)
-        while new_name in target_dict:
-            new_name = '%s-%d' % (name, advance_iterator(disambiguator))
-        name = new_name
-        target_dict[name] = _copy_content(content_object)
-
-
-class TestCase(unittest.TestCase):
-    """Extensions to the basic TestCase.
-
-    :ivar exception_handlers: Exceptions to catch from setUp, runTest and
-        tearDown. This list is able to be modified at any time and consists of
-        (exception_class, handler(case, result, exception_value)) pairs.
-    :ivar force_failure: Force testtools.RunTest to fail the test after the
-        test has completed.
-    :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
-        Defaults to ``RunTest``.  The factory is expected to take a test case
-        and an optional list of exception handlers.
-    """
-
-    skipException = TestSkipped
-
-    run_tests_with = RunTest
-
-    def __init__(self, *args, **kwargs):
-        """Construct a TestCase.
-
-        :param testMethod: The name of the method to run.
-        :keyword runTest: Optional class to use to execute the test. If not
-            supplied ``RunTest`` is used. The instance to be used is created
-            when run() is invoked, so will be fresh each time. Overrides
-            ``TestCase.run_tests_with`` if given.
-        """
-        runTest = kwargs.pop('runTest', None)
-        super(TestCase, self).__init__(*args, **kwargs)
-        self._cleanups = []
-        self._unique_id_gen = itertools.count(1)
-        # Generators to ensure unique traceback ids.  Maps traceback label to
-        # iterators.
-        self._traceback_id_gens = {}
-        self.__setup_called = False
-        self.__teardown_called = False
-        # __details is lazy-initialized so that a constructed-but-not-run
-        # TestCase is safe to use with clone_test_with_new_id.
-        self.__details = None
-        test_method = self._get_test_method()
-        if runTest is None:
-            runTest = getattr(
-                test_method, '_run_test_with', self.run_tests_with)
-        self.__RunTest = runTest
-        if getattr(test_method, '__unittest_expecting_failure__', False):
-            setattr(self, self._testMethodName, _expectedFailure(test_method))
-        # Used internally for onException processing - used to gather extra
-        # data from exceptions.
-        self.__exception_handlers = []
-        # Passed to RunTest to map exceptions to result actions
-        self.exception_handlers = [
-            (self.skipException, self._report_skip),
-            (self.failureException, self._report_failure),
-            (_ExpectedFailure, self._report_expected_failure),
-            (_UnexpectedSuccess, self._report_unexpected_success),
-            (Exception, self._report_error),
-            ]
-
-    def __eq__(self, other):
-        eq = getattr(unittest.TestCase, '__eq__', None)
-        if eq is not None and not unittest.TestCase.__eq__(self, other):
-            return False
-        return self.__dict__ == other.__dict__
-
-    def __repr__(self):
-        # We add id to the repr because it makes testing testtools easier.
-        return "<%s id=0x%0x>" % (self.id(), id(self))
-
-    def addDetail(self, name, content_object):
-        """Add a detail to be reported with this test's outcome.
-
-        For more details see pydoc testtools.TestResult.
-
-        :param name: The name to give this detail.
-        :param content_object: The content object for this detail. See
-            testtools.content for more detail.
-        """
-        if self.__details is None:
-            self.__details = {}
-        self.__details[name] = content_object
-
-    def getDetails(self):
-        """Get the details dict that will be reported with this test's outcome.
-
-        For more details see pydoc testtools.TestResult.
-        """
-        if self.__details is None:
-            self.__details = {}
-        return self.__details
-
-    def patch(self, obj, attribute, value):
-        """Monkey-patch 'obj.attribute' to 'value' while the test is running.
-
-        If 'obj' has no attribute, then the monkey-patch will still go ahead,
-        and the attribute will be deleted instead of restored to its original
-        value.
-
-        :param obj: The object to patch. Can be anything.
-        :param attribute: The attribute on 'obj' to patch.
-        :param value: The value to set 'obj.attribute' to.
-        """
-        self.addCleanup(patch(obj, attribute, value))
-
-    def shortDescription(self):
-        return self.id()
-
-    def skipTest(self, reason):
-        """Cause this test to be skipped.
-
-        This raises self.skipException(reason). skipException is raised
-        to permit a skip to be triggered at any point (during setUp or the
-        testMethod itself). The run() method catches skipException and
-        translates that into a call to the result objects addSkip method.
-
-        :param reason: The reason why the test is being skipped. This must
-            support being cast into a unicode string for reporting.
-        """
-        raise self.skipException(reason)
-
-    # skipTest is how python2.7 spells this. Sometime in the future
-    # This should be given a deprecation decorator - RBC 20100611.
-    skip = skipTest
-
-    def _formatTypes(self, classOrIterable):
-        """Format a class or a bunch of classes for display in an error."""
-        className = getattr(classOrIterable, '__name__', None)
-        if className is None:
-            className = ', '.join(klass.__name__ for klass in classOrIterable)
-        return className
-
-    def addCleanup(self, function, *arguments, **keywordArguments):
-        """Add a cleanup function to be called after tearDown.
-
-        Functions added with addCleanup will be called in reverse order of
-        adding after tearDown, or after setUp if setUp raises an exception.
-
-        If a function added with addCleanup raises an exception, the error
-        will be recorded as a test error, and the next cleanup will then be
-        run.
-
-        Cleanup functions are always called before a test finishes running,
-        even if setUp is aborted by an exception.
-        """
-        self._cleanups.append((function, arguments, keywordArguments))
-
-    def addOnException(self, handler):
-        """Add a handler to be called when an exception occurs in test code.
-
-        This handler cannot affect what result methods are called, and is
-        called before any outcome is called on the result object. An example
-        use for it is to add some diagnostic state to the test details dict
-        which is expensive to calculate and not interesting for reporting in
-        the success case.
-
-        Handlers are called before the outcome (such as addFailure) that
-        the exception has caused.
-
-        Handlers are called in first-added, first-called order, and if they
-        raise an exception, that will propogate out of the test running
-        machinery, halting test processing. As a result, do not call code that
-        may unreasonably fail.
-        """
-        self.__exception_handlers.append(handler)
-
-    def _add_reason(self, reason):
-        self.addDetail('reason', content.text_content(reason))
-
-    def assertEqual(self, expected, observed, message=''):
-        """Assert that 'expected' is equal to 'observed'.
-
-        :param expected: The expected value.
-        :param observed: The observed value.
-        :param message: An optional message to include in the error.
-        """
-        matcher = Equals(expected)
-        self.assertThat(observed, matcher, message)
-
-    failUnlessEqual = assertEquals = assertEqual
-
-    def assertIn(self, needle, haystack, message=''):
-        """Assert that needle is in haystack."""
-        self.assertThat(haystack, Contains(needle), message)
-
-    def assertIsNone(self, observed, message=''):
-        """Assert that 'observed' is equal to None.
-
-        :param observed: The observed value.
-        :param message: An optional message describing the error.
-        """
-        matcher = Is(None)
-        self.assertThat(observed, matcher, message)
-
-    def assertIsNotNone(self, observed, message=''):
-        """Assert that 'observed' is not equal to None.
-
-        :param observed: The observed value.
-        :param message: An optional message describing the error.
-        """
-        matcher = Not(Is(None))
-        self.assertThat(observed, matcher, message)
-
-    def assertIs(self, expected, observed, message=''):
-        """Assert that 'expected' is 'observed'.
-
-        :param expected: The expected value.
-        :param observed: The observed value.
-        :param message: An optional message describing the error.
-        """
-        matcher = Is(expected)
-        self.assertThat(observed, matcher, message)
-
-    def assertIsNot(self, expected, observed, message=''):
-        """Assert that 'expected' is not 'observed'."""
-        matcher = Not(Is(expected))
-        self.assertThat(observed, matcher, message)
-
-    def assertNotIn(self, needle, haystack, message=''):
-        """Assert that needle is not in haystack."""
-        matcher = Not(Contains(needle))
-        self.assertThat(haystack, matcher, message)
-
-    def assertIsInstance(self, obj, klass, msg=None):
-        if isinstance(klass, tuple):
-            matcher = IsInstance(*klass)
-        else:
-            matcher = IsInstance(klass)
-        self.assertThat(obj, matcher, msg)
-
-    def assertRaises(self, excClass, callableObj, *args, **kwargs):
-        """Fail unless an exception of class excClass is thrown
-           by callableObj when invoked with arguments args and keyword
-           arguments kwargs. If a different type of exception is
-           thrown, it will not be caught, and the test case will be
-           deemed to have suffered an error, exactly as for an
-           unexpected exception.
-        """
-        class ReRaiseOtherTypes(object):
-            def match(self, matchee):
-                if not issubclass(matchee[0], excClass):
-                    reraise(*matchee)
-        class CaptureMatchee(object):
-            def match(self, matchee):
-                self.matchee = matchee[1]
-        capture = CaptureMatchee()
-        matcher = Raises(MatchesAll(ReRaiseOtherTypes(),
-                MatchesException(excClass), capture))
-        our_callable = Nullary(callableObj, *args, **kwargs)
-        self.assertThat(our_callable, matcher)
-        return capture.matchee
-    failUnlessRaises = assertRaises
-
-    def assertThat(self, matchee, matcher, message='', verbose=False):
-        """Assert that matchee is matched by matcher.
-
-        :param matchee: An object to match with matcher.
-        :param matcher: An object meeting the testtools.Matcher protocol.
-        :raises MismatchError: When matcher does not match thing.
-        """
-        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
-        if mismatch_error is not None:
-            raise mismatch_error
-
-    def addDetailUniqueName(self, name, content_object):
-        """Add a detail to the test, but ensure it's name is unique.
-
-        This method checks whether ``name`` conflicts with a detail that has
-        already been added to the test. If it does, it will modify ``name`` to
-        avoid the conflict.
-
-        For more details see pydoc testtools.TestResult.
-
-        :param name: The name to give this detail.
-        :param content_object: The content object for this detail. See
-            testtools.content for more detail.
-        """
-        existing_details = self.getDetails()
-        full_name = name
-        suffix = 1
-        while full_name in existing_details:
-            full_name = "%s-%d" % (name, suffix)
-            suffix += 1
-        self.addDetail(full_name, content_object)
-
-    def expectThat(self, matchee, matcher, message='', verbose=False):
-        """Check that matchee is matched by matcher, but delay the assertion failure.
-
-        This method behaves similarly to ``assertThat``, except that a failed
-        match does not exit the test immediately. The rest of the test code will
-        continue to run, and the test will be marked as failing after the test
-        has finished.
-
-        :param matchee: An object to match with matcher.
-        :param matcher: An object meeting the testtools.Matcher protocol.
-        :param message: If specified, show this message with any failed match.
-        """
-        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
-
-        if mismatch_error is not None:
-            self.addDetailUniqueName(
-                "Failed expectation",
-                content.StacktraceContent(
-                    postfix_content="MismatchError: " + str(mismatch_error)
-                )
-            )
-            self.force_failure = True
-
-    def _matchHelper(self, matchee, matcher, message, verbose):
-        matcher = Annotate.if_message(message, matcher)
-        mismatch = matcher.match(matchee)
-        if not mismatch:
-            return
-        for (name, value) in mismatch.get_details().items():
-            self.addDetailUniqueName(name, value)
-        return MismatchError(matchee, matcher, mismatch, verbose)
-
-    def defaultTestResult(self):
-        return TestResult()
-
-    def expectFailure(self, reason, predicate, *args, **kwargs):
-        """Check that a test fails in a particular way.
-
-        If the test fails in the expected way, a KnownFailure is caused. If it
-        succeeds an UnexpectedSuccess is caused.
-
-        The expected use of expectFailure is as a barrier at the point in a
-        test where the test would fail. For example:
-        >>> def test_foo(self):
-        >>>    self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
-        >>>    self.assertEqual(1, 0)
-
-        If in the future 1 were to equal 0, the expectFailure call can simply
-        be removed. This separation preserves the original intent of the test
-        while it is in the expectFailure mode.
-        """
-        # TODO: implement with matchers.
-        self._add_reason(reason)
-        try:
-            predicate(*args, **kwargs)
-        except self.failureException:
-            # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
-            #                unittest _ExpectedFailure wants old traceback
-            exc_info = sys.exc_info()
-            try:
-                self._report_traceback(exc_info)
-                raise _ExpectedFailure(exc_info)
-            finally:
-                del exc_info
-        else:
-            raise _UnexpectedSuccess(reason)
-
-    def getUniqueInteger(self):
-        """Get an integer unique to this test.
-
-        Returns an integer that is guaranteed to be unique to this instance.
-        Use this when you need an arbitrary integer in your test, or as a
-        helper for custom anonymous factory methods.
-        """
-        return advance_iterator(self._unique_id_gen)
-
-    def getUniqueString(self, prefix=None):
-        """Get a string unique to this test.
-
-        Returns a string that is guaranteed to be unique to this instance. Use
-        this when you need an arbitrary string in your test, or as a helper
-        for custom anonymous factory methods.
-
-        :param prefix: The prefix of the string. If not provided, defaults
-            to the id of the tests.
-        :return: A bytestring of '<prefix>-<unique_int>'.
-        """
-        if prefix is None:
-            prefix = self.id()
-        return '%s-%d' % (prefix, self.getUniqueInteger())
-
-    def onException(self, exc_info, tb_label='traceback'):
-        """Called when an exception propogates from test code.
-
-        :seealso addOnException:
-        """
-        if exc_info[0] not in [
-            TestSkipped, _UnexpectedSuccess, _ExpectedFailure]:
-            self._report_traceback(exc_info, tb_label=tb_label)
-        for handler in self.__exception_handlers:
-            handler(exc_info)
-
-    @staticmethod
-    def _report_error(self, result, err):
-        result.addError(self, details=self.getDetails())
-
-    @staticmethod
-    def _report_expected_failure(self, result, err):
-        result.addExpectedFailure(self, details=self.getDetails())
-
-    @staticmethod
-    def _report_failure(self, result, err):
-        result.addFailure(self, details=self.getDetails())
-
-    @staticmethod
-    def _report_skip(self, result, err):
-        if err.args:
-            reason = err.args[0]
-        else:
-            reason = "no reason given."
-        self._add_reason(reason)
-        result.addSkip(self, details=self.getDetails())
-
-    def _report_traceback(self, exc_info, tb_label='traceback'):
-        id_gen = self._traceback_id_gens.setdefault(
-            tb_label, itertools.count(0))
-        while True:
-            tb_id = advance_iterator(id_gen)
-            if tb_id:
-                tb_label = '%s-%d' % (tb_label, tb_id)
-            if tb_label not in self.getDetails():
-                break
-        self.addDetail(tb_label, content.TracebackContent(exc_info, self))
-
-    @staticmethod
-    def _report_unexpected_success(self, result, err):
-        result.addUnexpectedSuccess(self, details=self.getDetails())
-
-    def run(self, result=None):
-        try:
-            run_test = self.__RunTest(
-                self, self.exception_handlers, last_resort=self._report_error)
-        except TypeError:
-            # Backwards compat: if we can't call the constructor
-            # with last_resort, try without that.
-            run_test = self.__RunTest(self, self.exception_handlers)
-        return run_test.run(result)
-
-    def _run_setup(self, result):
-        """Run the setUp function for this test.
-
-        :param result: A testtools.TestResult to report activity to.
-        :raises ValueError: If the base class setUp is not called, a
-            ValueError is raised.
-        """
-        ret = self.setUp()
-        if not self.__setup_called:
-            raise ValueError(
-                "In File: %s\n"
-                "TestCase.setUp was not called. Have you upcalled all the "
-                "way up the hierarchy from your setUp? e.g. Call "
-                "super(%s, self).setUp() from your setUp()."
-                % (sys.modules[self.__class__.__module__].__file__,
-                   self.__class__.__name__))
-        return ret
-
-    def _run_teardown(self, result):
-        """Run the tearDown function for this test.
-
-        :param result: A testtools.TestResult to report activity to.
-        :raises ValueError: If the base class tearDown is not called, a
-            ValueError is raised.
-        """
-        ret = self.tearDown()
-        if not self.__teardown_called:
-            raise ValueError(
-                "In File: %s\n"
-                "TestCase.tearDown was not called. Have you upcalled all the "
-                "way up the hierarchy from your tearDown? e.g. Call "
-                "super(%s, self).tearDown() from your tearDown()."
-                % (sys.modules[self.__class__.__module__].__file__,
-                   self.__class__.__name__))
-        return ret
-
-    def _get_test_method(self):
-        method_name = getattr(self, '_testMethodName')
-        return getattr(self, method_name)
-
-    def _run_test_method(self, result):
-        """Run the test method for this test.
-
-        :param result: A testtools.TestResult to report activity to.
-        :return: None.
-        """
-        return self._get_test_method()()
-
-    def useFixture(self, fixture):
-        """Use fixture in a test case.
-
-        The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
-
-        :param fixture: The fixture to use.
-        :return: The fixture, after setting it up and scheduling a cleanup for
-           it.
-        """
-        try:
-            fixture.setUp()
-        except:
-            exc_info = sys.exc_info()
-            try:
-                gather_details(fixture.getDetails(), self.getDetails())
-            except:
-                # Report the setUp exception, then raise the error during
-                # gather_details.
-                self._report_traceback(exc_info)
-                raise
-            else:
-                # Gather_details worked, so raise the exception setUp
-                # encountered.
-                reraise(*exc_info)
-        else:
-            self.addCleanup(fixture.cleanUp)
-            self.addCleanup(
-                gather_details, fixture.getDetails(), self.getDetails())
-            return fixture
-
-    def setUp(self):
-        super(TestCase, self).setUp()
-        if self.__setup_called:
-            raise ValueError(
-                "In File: %s\n"
-                "TestCase.setUp was already called. Do not explicitly call "
-                "setUp from your tests. In your own setUp, use super to call "
-                "the base setUp."
-                % (sys.modules[self.__class__.__module__].__file__,))
-        self.__setup_called = True
-
-    def tearDown(self):
-        super(TestCase, self).tearDown()
-        if self.__teardown_called:
-            raise ValueError(
-                "In File: %s\n"
-                "TestCase.tearDown was already called. Do not explicitly call "
-                "tearDown from your tests. In your own tearDown, use super to "
-                "call the base tearDown."
-                % (sys.modules[self.__class__.__module__].__file__,))
-        self.__teardown_called = True
-
-
-class PlaceHolder(object):
-    """A placeholder test.
-
-    `PlaceHolder` implements much of the same interface as TestCase and is
-    particularly suitable for being added to TestResults.
-    """
-
-    failureException = None
-
-    def __init__(self, test_id, short_description=None, details=None,
-        outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
-        """Construct a `PlaceHolder`.
-
-        :param test_id: The id of the placeholder test.
-        :param short_description: The short description of the place holder
-            test. If not provided, the id will be used instead.
-        :param details: Outcome details as accepted by addSuccess etc.
-        :param outcome: The outcome to call. Defaults to 'addSuccess'.
-        :param tags: Tags to report for the test.
-        :param timestamps: A two-tuple of timestamps for the test start and
-            finish. Each timestamp may be None to indicate it is not known.
-        """
-        self._test_id = test_id
-        self._short_description = short_description
-        self._details = details or {}
-        self._outcome = outcome
-        if error is not None:
-            self._details['traceback'] = content.TracebackContent(error, self)
-        tags = tags or frozenset()
-        self._tags = frozenset(tags)
-        self._timestamps = timestamps
-
-    def __call__(self, result=None):
-        return self.run(result=result)
-
-    def __repr__(self):
-        internal = [self._outcome, self._test_id, self._details]
-        if self._short_description is not None:
-            internal.append(self._short_description)
-        return "<%s.%s(%s)>" % (
-            self.__class__.__module__,
-            self.__class__.__name__,
-            ", ".join(map(repr, internal)))
-
-    def __str__(self):
-        return self.id()
-
-    def countTestCases(self):
-        return 1
-
-    def debug(self):
-        pass
-
-    def id(self):
-        return self._test_id
-
-    def _result(self, result):
-        if result is None:
-            return TestResult()
-        else:
-            return ExtendedToOriginalDecorator(result)
-
-    def run(self, result=None):
-        result = self._result(result)
-        if self._timestamps[0] is not None:
-            result.time(self._timestamps[0])
-        result.tags(self._tags, set())
-        result.startTest(self)
-        if self._timestamps[1] is not None:
-            result.time(self._timestamps[1])
-        outcome = getattr(result, self._outcome)
-        outcome(self, details=self._details)
-        result.stopTest(self)
-        result.tags(set(), self._tags)
-
-    def shortDescription(self):
-        if self._short_description is None:
-            return self.id()
-        else:
-            return self._short_description
-
-
-def ErrorHolder(test_id, error, short_description=None, details=None):
-    """Construct an `ErrorHolder`.
-
-    :param test_id: The id of the test.
-    :param error: The exc info tuple that will be used as the test's error.
-        This is inserted into the details as 'traceback' - any existing key
-        will be overridden.
-    :param short_description: An optional short description of the test.
-    :param details: Outcome details as accepted by addSuccess etc.
-    """
-    return PlaceHolder(test_id, short_description=short_description,
-        details=details, outcome='addError', error=error)
-
-
-def _clone_test_id_callback(test, callback):
-    """Copy a `TestCase`, and make it call callback for its id().
-
-    This is only expected to be used on tests that have been constructed but
-    not executed.
-
-    :param test: A TestCase instance.
-    :param callback: A callable that takes no parameters and returns a string.
-    :return: A copy.copy of the test with id=callback.
-    """
-    newTest = copy.copy(test)
-    newTest.id = callback
-    return newTest
-
-
-def clone_test_with_new_id(test, new_id):
-    """Copy a `TestCase`, and give the copied test a new id.
-
-    This is only expected to be used on tests that have been constructed but
-    not executed.
-    """
-    return _clone_test_id_callback(test, lambda: new_id)
-
-
-def attr(*args):
-    """Decorator for adding attributes to WithAttributes.
-
-    :param args: The name of attributes to add.
-    :return: A callable that when applied to a WithAttributes will
-        alter its id to enumerate the added attributes.
-    """
-    def decorate(fn):
-        if not safe_hasattr(fn, '__testtools_attrs'):
-            fn.__testtools_attrs = set()
-        fn.__testtools_attrs.update(args)
-        return fn
-    return decorate
-
-
-class WithAttributes(object):
-    """A mix-in class for modifying test id by attributes.
-
-    e.g.
-    >>> class MyTest(WithAttributes, TestCase):
-    ...    @attr('foo')
-    ...    def test_bar(self):
-    ...        pass
-    >>> MyTest('test_bar').id()
-    testtools.testcase.MyTest/test_bar[foo]
-    """
-
-    def id(self):
-        orig = super(WithAttributes, self).id()
-        # Depends on testtools.TestCase._get_test_method, be nice to support
-        # plain unittest.
-        fn = self._get_test_method()
-        attributes = getattr(fn, '__testtools_attrs', None)
-        if not attributes:
-            return orig
-        return orig + '[' + ','.join(sorted(attributes)) + ']'
-
-
-def skip(reason):
-    """A decorator to skip unit tests.
-
-    This is just syntactic sugar so users don't have to change any of their
-    unit tests in order to migrate to python 2.7, which provides the
-    @unittest.skip decorator.
-    """
-    def decorator(test_item):
-        # This attribute signals to RunTest._run_core that the entire test
-        # must be skipped - including setUp and tearDown. This makes us
-        # compatible with testtools.skip* functions, which set the same
-        # attributes.
-        test_item.__unittest_skip__ = True
-        test_item.__unittest_skip_why__ = reason
-        if wraps is not None:
-            @wraps(test_item)
-            def skip_wrapper(*args, **kwargs):
-                raise TestCase.skipException(reason)
-        else:
-            def skip_wrapper(test_item):
-                test_item.skip(reason)
-        return skip_wrapper
-    return decorator
-
-
-def skipIf(condition, reason):
-    """A decorator to skip a test if the condition is true."""
-    if condition:
-        return skip(reason)
-    def _id(obj):
-        return obj
-    return _id
-
-
-def skipUnless(condition, reason):
-    """A decorator to skip a test unless the condition is true."""
-    if not condition:
-        return skip(reason)
-    def _id(obj):
-        return obj
-    return _id
-
-
-class ExpectedException:
-    """A context manager to handle expected exceptions.
-
-      def test_foo(self):
-          with ExpectedException(ValueError, 'fo.*'):
-              raise ValueError('foo')
-
-    will pass.  If the raised exception has a type other than the specified
-    type, it will be re-raised.  If it has a 'str()' that does not match the
-    given regular expression, an AssertionError will be raised.  If no
-    exception is raised, an AssertionError will be raised.
-    """
-
-    def __init__(self, exc_type, value_re=None, msg=None):
-        """Construct an `ExpectedException`.
-
-        :param exc_type: The type of exception to expect.
-        :param value_re: A regular expression to match against the
-            'str()' of the raised exception.
-        :param msg: An optional message explaining the failure.
-        """
-        self.exc_type = exc_type
-        self.value_re = value_re
-        self.msg = msg
-
-    def __enter__(self):
-        pass
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        if exc_type is None:
-            error_msg = '%s not raised.' % self.exc_type.__name__
-            if self.msg:
-                error_msg = error_msg + ' : ' + self.msg
-            raise AssertionError(error_msg)
-        if exc_type != self.exc_type:
-            return False
-        if self.value_re:
-            matcher = MatchesException(self.exc_type, self.value_re)
-            if self.msg:
-                matcher = Annotate(self.msg, matcher)
-            mismatch = matcher.match((exc_type, exc_value, traceback))
-            if mismatch:
-                raise AssertionError(mismatch.describe())
-        return True
-
-
-class Nullary(object):
-    """Turn a callable into a nullary callable.
-
-    The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
-    preserves the ``repr()`` of ``f``.
-    """
-
-    def __init__(self, callable_object, *args, **kwargs):
-        self._callable_object = callable_object
-        self._args = args
-        self._kwargs = kwargs
-
-    def __call__(self):
-        return self._callable_object(*self._args, **self._kwargs)
-
-    def __repr__(self):
-        return repr(self._callable_object)
-
-
-class DecorateTestCaseResult(object):
-    """Decorate a TestCase and permit customisation of the result for runs."""
-
-    def __init__(self, case, callout, before_run=None, after_run=None):
-        """Construct a DecorateTestCaseResult.
-
-        :param case: The case to decorate.
-        :param callout: A callback to call when run/__call__/debug is called.
-            Must take a result parameter and return a result object to be used.
-            For instance: lambda result: result.
-        :param before_run: If set, call this with the decorated result before
-            calling into the decorated run/__call__ method.
-        :param before_run: If set, call this with the decorated result after
-            calling into the decorated run/__call__ method.
-        """
-        self.decorated = case
-        self.callout = callout
-        self.before_run = before_run
-        self.after_run = after_run
-
-    def _run(self, result, run_method):
-        result = self.callout(result)
-        if self.before_run:
-            self.before_run(result)
-        try:
-            return run_method(result)
-        finally:
-            if self.after_run:
-                self.after_run(result)
-
-    def run(self, result=None):
-        self._run(result, self.decorated.run)
-
-    def __call__(self, result=None):
-        self._run(result, self.decorated)
-
-    def __getattr__(self, name):
-        return getattr(self.decorated, name)
-
-    def __delattr__(self, name):
-        delattr(self.decorated, name)
-
-    def __setattr__(self, name, value):
-        if name in ('decorated', 'callout', 'before_run', 'after_run'):
-            self.__dict__[name] = value
-            return
-        setattr(self.decorated, name, value)
-
-
-# Signal that this is part of the testing framework, and that code from this
-# should not normally appear in tracebacks.
-__unittest = True
diff --git a/lib/testtools/testtools/testresult/__init__.py b/lib/testtools/testtools/testresult/__init__.py
deleted file mode 100644
index 5bf8f9c..0000000
--- a/lib/testtools/testtools/testresult/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-"""Test result objects."""
-
-__all__ = [
-    'CopyStreamResult',
-    'ExtendedToOriginalDecorator',
-    'ExtendedToStreamDecorator',
-    'MultiTestResult',
-    'StreamFailFast',
-    'StreamResult',
-    'StreamResultRouter',
-    'StreamSummary',
-    'StreamTagger',
-    'StreamToDict',
-    'StreamToExtendedDecorator',
-    'StreamToQueue',
-    'Tagger',
-    'TestByTestResult',
-    'TestControl',
-    'TestResult',
-    'TestResultDecorator',
-    'TextTestResult',
-    'ThreadsafeForwardingResult',
-    'TimestampingStreamResult',
-    ]
-
-from testtools.testresult.real import (
-    CopyStreamResult,
-    ExtendedToOriginalDecorator,
-    ExtendedToStreamDecorator,
-    MultiTestResult,
-    StreamFailFast,
-    StreamResult,
-    StreamResultRouter,
-    StreamSummary,
-    StreamTagger,
-    StreamToDict,
-    StreamToExtendedDecorator,
-    StreamToQueue,
-    Tagger,
-    TestByTestResult,
-    TestControl,
-    TestResult,
-    TestResultDecorator,
-    TextTestResult,
-    ThreadsafeForwardingResult,
-    TimestampingStreamResult,
-    )
diff --git a/lib/testtools/testtools/testresult/doubles.py b/lib/testtools/testtools/testresult/doubles.py
deleted file mode 100644
index d86f7fa..0000000
--- a/lib/testtools/testtools/testresult/doubles.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
-
-"""Doubles of test result objects, useful for testing unittest code."""
-
-__all__ = [
-    'Python26TestResult',
-    'Python27TestResult',
-    'ExtendedTestResult',
-    'StreamResult',
-    ]
-
-
-from testtools.tags import TagContext
-
-
-class LoggingBase(object):
-    """Basic support for logging of results."""
-
-    def __init__(self):
-        self._events = []
-        self.shouldStop = False
-        self._was_successful = True
-        self.testsRun = 0
-
-
-class Python26TestResult(LoggingBase):
-    """A precisely python 2.6 like test result, that logs."""
-
-    def addError(self, test, err):
-        self._was_successful = False
-        self._events.append(('addError', test, err))
-
-    def addFailure(self, test, err):
-        self._was_successful = False
-        self._events.append(('addFailure', test, err))
-
-    def addSuccess(self, test):
-        self._events.append(('addSuccess', test))
-
-    def startTest(self, test):
-        self._events.append(('startTest', test))
-        self.testsRun += 1
-
-    def stop(self):
-        self.shouldStop = True
-
-    def stopTest(self, test):
-        self._events.append(('stopTest', test))
-
-    def wasSuccessful(self):
-        return self._was_successful
-
-
-class Python27TestResult(Python26TestResult):
-    """A precisely python 2.7 like test result, that logs."""
-
-    def __init__(self):
-        super(Python27TestResult, self).__init__()
-        self.failfast = False
-
-    def addError(self, test, err):
-        super(Python27TestResult, self).addError(test, err)
-        if self.failfast:
-            self.stop()
-
-    def addFailure(self, test, err):
-        super(Python27TestResult, self).addFailure(test, err)
-        if self.failfast:
-            self.stop()
-
-    def addExpectedFailure(self, test, err):
-        self._events.append(('addExpectedFailure', test, err))
-
-    def addSkip(self, test, reason):
-        self._events.append(('addSkip', test, reason))
-
-    def addUnexpectedSuccess(self, test):
-        self._events.append(('addUnexpectedSuccess', test))
-        if self.failfast:
-            self.stop()
-
-    def startTestRun(self):
-        self._events.append(('startTestRun',))
-
-    def stopTestRun(self):
-        self._events.append(('stopTestRun',))
-
-
-class ExtendedTestResult(Python27TestResult):
-    """A test result like the proposed extended unittest result API."""
-
-    def __init__(self):
-        super(ExtendedTestResult, self).__init__()
-        self._tags = TagContext()
-
-    def addError(self, test, err=None, details=None):
-        self._was_successful = False
-        self._events.append(('addError', test, err or details))
-
-    def addFailure(self, test, err=None, details=None):
-        self._was_successful = False
-        self._events.append(('addFailure', test, err or details))
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._events.append(('addExpectedFailure', test, err or details))
-
-    def addSkip(self, test, reason=None, details=None):
-        self._events.append(('addSkip', test, reason or details))
-
-    def addSuccess(self, test, details=None):
-        if details:
-            self._events.append(('addSuccess', test, details))
-        else:
-            self._events.append(('addSuccess', test))
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self._was_successful = False
-        if details is not None:
-            self._events.append(('addUnexpectedSuccess', test, details))
-        else:
-            self._events.append(('addUnexpectedSuccess', test))
-
-    def progress(self, offset, whence):
-        self._events.append(('progress', offset, whence))
-
-    def startTestRun(self):
-        super(ExtendedTestResult, self).startTestRun()
-        self._was_successful = True
-        self._tags = TagContext()
-
-    def startTest(self, test):
-        super(ExtendedTestResult, self).startTest(test)
-        self._tags = TagContext(self._tags)
-
-    def stopTest(self, test):
-        self._tags = self._tags.parent
-        super(ExtendedTestResult, self).stopTest(test)
-
-    @property
-    def current_tags(self):
-        return self._tags.get_current_tags()
-
-    def tags(self, new_tags, gone_tags):
-        self._tags.change_tags(new_tags, gone_tags)
-        self._events.append(('tags', new_tags, gone_tags))
-
-    def time(self, time):
-        self._events.append(('time', time))
-
-    def wasSuccessful(self):
-        return self._was_successful
-
-
-class StreamResult(object):
-    """A StreamResult implementation for testing.
-
-    All events are logged to _events.
-    """
-
-    def __init__(self):
-        self._events = []
-
-    def startTestRun(self):
-        self._events.append(('startTestRun',))
-
-    def stopTestRun(self):
-        self._events.append(('stopTestRun',))
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        self._events.append(('status', test_id, test_status, test_tags,
-            runnable, file_name, file_bytes, eof, mime_type, route_code,
-            timestamp))
diff --git a/lib/testtools/testtools/testresult/real.py b/lib/testtools/testtools/testresult/real.py
deleted file mode 100644
index 1453041..0000000
--- a/lib/testtools/testtools/testresult/real.py
+++ /dev/null
@@ -1,1777 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Test results and related things."""
-
-__metaclass__ = type
-__all__ = [
-    'ExtendedToOriginalDecorator',
-    'ExtendedToStreamDecorator',
-    'MultiTestResult',
-    'StreamFailFast',
-    'StreamResult',
-    'StreamSummary',
-    'StreamTagger',
-    'StreamToDict',
-    'StreamToExtendedDecorator',
-    'StreamToQueue',
-    'Tagger',
-    'TestControl',
-    'TestResult',
-    'TestResultDecorator',
-    'ThreadsafeForwardingResult',
-    'TimestampingStreamResult',
-    ]
-
-import datetime
-from operator import methodcaller
-import sys
-import unittest
-
-from extras import safe_hasattr, try_import, try_imports
-parse_mime_type = try_import('mimeparse.parse_mime_type')
-Queue = try_imports(['Queue.Queue', 'queue.Queue'])
-
-from testtools.compat import str_is_unicode, _u, _b
-from testtools.content import (
-    Content,
-    text_content,
-    TracebackContent,
-    )
-from testtools.content_type import ContentType
-from testtools.tags import TagContext
-# circular import
-# from testtools.testcase import PlaceHolder
-PlaceHolder = None
-
-# From http://docs.python.org/library/datetime.html
-_ZERO = datetime.timedelta(0)
-
-# A UTC class.
-
-class UTC(datetime.tzinfo):
-    """UTC"""
-
-    def utcoffset(self, dt):
-        return _ZERO
-
-    def tzname(self, dt):
-        return "UTC"
-
-    def dst(self, dt):
-        return _ZERO
-
-utc = UTC()
-
-
-class TestResult(unittest.TestResult):
-    """Subclass of unittest.TestResult extending the protocol for flexability.
-
-    This test result supports an experimental protocol for providing additional
-    data to in test outcomes. All the outcome methods take an optional dict
-    'details'. If supplied any other detail parameters like 'err' or 'reason'
-    should not be provided. The details dict is a mapping from names to
-    MIME content objects (see testtools.content). This permits attaching
-    tracebacks, log files, or even large objects like databases that were
-    part of the test fixture. Until this API is accepted into upstream
-    Python it is considered experimental: it may be replaced at any point
-    by a newer version more in line with upstream Python. Compatibility would
-    be aimed for in this case, but may not be possible.
-
-    :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
-    """
-
-    def __init__(self, failfast=False):
-        # startTestRun resets all attributes, and older clients don't know to
-        # call startTestRun, so it is called once here.
-        # Because subclasses may reasonably not expect this, we call the
-        # specific version we want to run.
-        self.failfast = failfast
-        TestResult.startTestRun(self)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        """Called when a test has failed in an expected manner.
-
-        Like with addSuccess and addError, testStopped should still be called.
-
-        :param test: The test that has been skipped.
-        :param err: The exc_info of the error that was raised.
-        :return: None
-        """
-        # This is the python 2.7 implementation
-        self.expectedFailures.append(
-            (test, self._err_details_to_string(test, err, details)))
-
-    def addError(self, test, err=None, details=None):
-        """Called when an error has occurred. 'err' is a tuple of values as
-        returned by sys.exc_info().
-
-        :param details: Alternative way to supply details about the outcome.
-            see the class docstring for more information.
-        """
-        self.errors.append((test,
-            self._err_details_to_string(test, err, details)))
-        if self.failfast:
-            self.stop()
-
-    def addFailure(self, test, err=None, details=None):
-        """Called when an error has occurred. 'err' is a tuple of values as
-        returned by sys.exc_info().
-
-        :param details: Alternative way to supply details about the outcome.
-            see the class docstring for more information.
-        """
-        self.failures.append((test,
-            self._err_details_to_string(test, err, details)))
-        if self.failfast:
-            self.stop()
-
-    def addSkip(self, test, reason=None, details=None):
-        """Called when a test has been skipped rather than running.
-
-        Like with addSuccess and addError, testStopped should still be called.
-
-        This must be called by the TestCase. 'addError' and 'addFailure' will
-        not call addSkip, since they have no assumptions about the kind of
-        errors that a test can raise.
-
-        :param test: The test that has been skipped.
-        :param reason: The reason for the test being skipped. For instance,
-            u"pyGL is not available".
-        :param details: Alternative way to supply details about the outcome.
-            see the class docstring for more information.
-        :return: None
-        """
-        if reason is None:
-            reason = details.get('reason')
-            if reason is None:
-                reason = 'No reason given'
-            else:
-                reason = reason.as_text()
-        skip_list = self.skip_reasons.setdefault(reason, [])
-        skip_list.append(test)
-
-    def addSuccess(self, test, details=None):
-        """Called when a test succeeded."""
-
-    def addUnexpectedSuccess(self, test, details=None):
-        """Called when a test was expected to fail, but succeed."""
-        self.unexpectedSuccesses.append(test)
-        if self.failfast:
-            self.stop()
-
-    def wasSuccessful(self):
-        """Has this result been successful so far?
-
-        If there have been any errors, failures or unexpected successes,
-        return False.  Otherwise, return True.
-
-        Note: This differs from standard unittest in that we consider
-        unexpected successes to be equivalent to failures, rather than
-        successes.
-        """
-        return not (self.errors or self.failures or self.unexpectedSuccesses)
-
-    def _err_details_to_string(self, test, err=None, details=None):
-        """Convert an error in exc_info form or a contents dict to a string."""
-        if err is not None:
-            return TracebackContent(err, test).as_text()
-        return _details_to_str(details, special='traceback')
-
-    def _exc_info_to_unicode(self, err, test):
-        # Deprecated.  Only present because subunit upcalls to it.  See
-        # <https://bugs.launchpad.net/testtools/+bug/929063>.
-        return TracebackContent(err, test).as_text()
-
-    def _now(self):
-        """Return the current 'test time'.
-
-        If the time() method has not been called, this is equivalent to
-        datetime.now(), otherwise its the last supplied datestamp given to the
-        time() method.
-        """
-        if self.__now is None:
-            return datetime.datetime.now(utc)
-        else:
-            return self.__now
-
-    def startTestRun(self):
-        """Called before a test run starts.
-
-        New in Python 2.7. The testtools version resets the result to a
-        pristine condition ready for use in another test run.  Note that this
-        is different from Python 2.7's startTestRun, which does nothing.
-        """
-        # failfast is reset by the super __init__, so stash it.
-        failfast = self.failfast
-        super(TestResult, self).__init__()
-        self.skip_reasons = {}
-        self.__now = None
-        self._tags = TagContext()
-        # -- Start: As per python 2.7 --
-        self.expectedFailures = []
-        self.unexpectedSuccesses = []
-        self.failfast = failfast
-        # -- End:   As per python 2.7 --
-
-    def stopTestRun(self):
-        """Called after a test run completes
-
-        New in python 2.7
-        """
-
-    def startTest(self, test):
-        super(TestResult, self).startTest(test)
-        self._tags = TagContext(self._tags)
-
-    def stopTest(self, test):
-        self._tags = self._tags.parent
-        super(TestResult, self).stopTest(test)
-
-    @property
-    def current_tags(self):
-        """The currently set tags."""
-        return self._tags.get_current_tags()
-
-    def tags(self, new_tags, gone_tags):
-        """Add and remove tags from the test.
-
-        :param new_tags: A set of tags to be added to the stream.
-        :param gone_tags: A set of tags to be removed from the stream.
-        """
-        self._tags.change_tags(new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        """Provide a timestamp to represent the current time.
-
-        This is useful when test activity is time delayed, or happening
-        concurrently and getting the system time between API calls will not
-        accurately represent the duration of tests (or the whole run).
-
-        Calling time() sets the datetime used by the TestResult object.
-        Time is permitted to go backwards when using this call.
-
-        :param a_datetime: A datetime.datetime object with TZ information or
-            None to reset the TestResult to gathering time from the system.
-        """
-        self.__now = a_datetime
-
-    def done(self):
-        """Called when the test runner is done.
-
-        deprecated in favour of stopTestRun.
-        """
-
-
-class StreamResult(object):
-    """A test result for reporting the activity of a test run.
-
-    Typical use
-
-      >>> result = StreamResult()
-      >>> result.startTestRun()
-      >>> try:
-      ...     case.run(result)
-      ... finally:
-      ...     result.stopTestRun()
-
-    The case object will be either a TestCase or a TestSuite, and
-    generally make a sequence of calls like::
-
-      >>> result.status(self.id(), 'inprogress')
-      >>> result.status(self.id(), 'success')
-
-    General concepts
-
-    StreamResult is built to process events that are emitted by tests during a
-    test run or test enumeration. The test run may be running concurrently, and
-    even be spread out across multiple machines.
-
-    All events are timestamped to prevent network buffering or scheduling
-    latency causing false timing reports. Timestamps are datetime objects in
-    the UTC timezone.
-
-    A route_code is a unicode string that identifies where a particular test
-    run. This is optional in the API but very useful when multiplexing multiple
-    streams together as it allows identification of interactions between tests
-    that were run on the same hardware or in the same test process. Generally
-    actual tests never need to bother with this - it is added and processed
-    by StreamResult's that do multiplexing / run analysis. route_codes are
-    also used to route stdin back to pdb instances.
-
-    The StreamResult base class does no accounting or processing, rather it
-    just provides an empty implementation of every method, suitable for use
-    as a base class regardless of intent.
-    """
-
-    def startTestRun(self):
-        """Start a test run.
-
-        This will prepare the test result to process results (which might imply
-        connecting to a database or remote machine).
-        """
-
-    def stopTestRun(self):
-        """Stop a test run.
-
-        This informs the result that no more test updates will be received. At
-        this point any test ids that have started and not completed can be
-        considered failed-or-hung.
-        """
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        """Inform the result about a test status.
-
-        :param test_id: The test whose status is being reported. None to
-            report status about the test run as a whole.
-        :param test_status: The status for the test. There are two sorts of
-            status - interim and final status events. As many interim events
-            can be generated as desired, but only one final event. After a
-            final status event any further file or status events from the
-            same test_id+route_code may be discarded or associated with a new
-            test by the StreamResult. (But no exception will be thrown).
-
-            Interim states:
-              * None - no particular status is being reported, or status being
-                reported is not associated with a test (e.g. when reporting on
-                stdout / stderr chatter).
-              * inprogress - the test is currently running. Emitted by tests when
-                they start running and at any intermediary point they might
-                choose to indicate their continual operation.
-
-            Final states:
-              * exists - the test exists. This is used when a test is not being
-                executed. Typically this is when querying what tests could be run
-                in a test run (which is useful for selecting tests to run).
-              * xfail - the test failed but that was expected. This is purely
-                informative - the test is not considered to be a failure.
-              * uxsuccess - the test passed but was expected to fail. The test
-                will be considered a failure.
-              * success - the test has finished without error.
-              * fail - the test failed (or errored). The test will be considered
-                a failure.
-              * skip - the test was selected to run but chose to be skipped. E.g.
-                a test dependency was missing. This is purely informative - the
-                test is not considered to be a failure.
-
-        :param test_tags: Optional set of tags to apply to the test. Tags
-            have no intrinsic meaning - that is up to the test author.
-        :param runnable: Allows status reports to mark that they are for
-            tests which are not able to be explicitly run. For instance,
-            subtests will report themselves as non-runnable.
-        :param file_name: The name for the file_bytes. Any unicode string may
-            be used. While there is no semantic value attached to the name
-            of any attachment, the names 'stdout' and 'stderr' and 'traceback'
-            are recommended for use only for output sent to stdout, stderr and
-            tracebacks of exceptions. When file_name is supplied, file_bytes
-            must be a bytes instance.
-        :param file_bytes: A bytes object containing content for the named
-            file. This can just be a single chunk of the file - emitting
-            another file event with more later. Must be None unleses a
-            file_name is supplied.
-        :param eof: True if this chunk is the last chunk of the file, any
-            additional chunks with the same name should be treated as an error
-            and discarded. Ignored unless file_name has been supplied.
-        :param mime_type: An optional MIME type for the file. stdout and
-            stderr will generally be "text/plain; charset=utf8". If None,
-            defaults to application/octet-stream. Ignored unless file_name
-            has been supplied.
-        """
-
-
-def domap(*args, **kwargs):
-    return list(map(*args, **kwargs))
-
-
-class CopyStreamResult(StreamResult):
-    """Copies all event it receives to multiple results.
-
-    This provides an easy facility for combining multiple StreamResults.
-
-    For TestResult the equivalent class was ``MultiTestResult``.
-    """
-
-    def __init__(self, targets):
-        super(CopyStreamResult, self).__init__()
-        self.targets = targets
-
-    def startTestRun(self):
-        super(CopyStreamResult, self).startTestRun()
-        domap(methodcaller('startTestRun'), self.targets)
-
-    def stopTestRun(self):
-        super(CopyStreamResult, self).stopTestRun()
-        domap(methodcaller('stopTestRun'), self.targets)
-
-    def status(self, *args, **kwargs):
-        super(CopyStreamResult, self).status(*args, **kwargs)
-        domap(methodcaller('status', *args, **kwargs), self.targets)
-
-
-class StreamFailFast(StreamResult):
-    """Call the supplied callback if an error is seen in a stream.
-
-    An example callback::
-
-       def do_something():
-           pass
-    """
-
-    def __init__(self, on_error):
-        self.on_error = on_error
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        if test_status in ('uxsuccess', 'fail'):
-            self.on_error()
-
-
-class StreamResultRouter(StreamResult):
-    """A StreamResult that routes events.
-
-    StreamResultRouter forwards received events to another StreamResult object,
-    selected by a dynamic forwarding policy. Events where no destination is
-    found are forwarded to the fallback StreamResult, or an error is raised.
-
-    Typical use is to construct a router with a fallback and then either
-    create up front mapping rules, or create them as-needed from the fallback
-    handler::
-
-      >>> router = StreamResultRouter()
-      >>> sink = doubles.StreamResult()
-      >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
-      ...     consume_route=True)
-      >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
-
-    StreamResultRouter has no buffering.
-
-    When adding routes (and for the fallback) whether to call startTestRun and
-    stopTestRun or to not call them is controllable by passing
-    'do_start_stop_run'. The default is to call them for the fallback only.
-    If a route is added after startTestRun has been called, and
-    do_start_stop_run is True then startTestRun is called immediately on the
-    new route sink.
-
-    There is no a-priori defined lookup order for routes: if they are ambiguous
-    the behaviour is undefined. Only a single route is chosen for any event.
-    """
-
-    _policies = {}
-
-    def __init__(self, fallback=None, do_start_stop_run=True):
-        """Construct a StreamResultRouter with optional fallback.
-
-        :param fallback: A StreamResult to forward events to when no route
-            exists for them.
-        :param do_start_stop_run: If False do not pass startTestRun and
-            stopTestRun onto the fallback.
-        """
-        self.fallback = fallback
-        self._route_code_prefixes = {}
-        self._test_ids = {}
-        # Records sinks that should have do_start_stop_run called on them.
-        self._sinks = []
-        if do_start_stop_run and fallback:
-            self._sinks.append(fallback)
-        self._in_run = False
-
-    def startTestRun(self):
-        super(StreamResultRouter, self).startTestRun()
-        for sink in self._sinks:
-            sink.startTestRun()
-        self._in_run = True
-
-    def stopTestRun(self):
-        super(StreamResultRouter, self).stopTestRun()
-        for sink in self._sinks:
-            sink.stopTestRun()
-        self._in_run = False
-
-    def status(self, **kwargs):
-        route_code = kwargs.get('route_code', None)
-        test_id = kwargs.get('test_id', None)
-        if route_code is not None:
-            prefix = route_code.split('/')[0]
-        else:
-            prefix = route_code
-        if prefix in self._route_code_prefixes:
-            target, consume_route = self._route_code_prefixes[prefix]
-            if route_code is not None and consume_route:
-                route_code = route_code[len(prefix) + 1:]
-                if not route_code:
-                    route_code = None
-                kwargs['route_code'] = route_code
-        elif test_id in self._test_ids:
-            target = self._test_ids[test_id]
-        else:
-            target = self.fallback
-        target.status(**kwargs)
-
-    def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args):
-        """Add a rule to route events to sink when they match a given policy.
-
-        :param sink: A StreamResult to receive events.
-        :param policy: A routing policy. Valid policies are
-            'route_code_prefix' and 'test_id'.
-        :param do_start_stop_run: If True then startTestRun and stopTestRun
-            events will be passed onto this sink.
-
-        :raises: ValueError if the policy is unknown
-        :raises: TypeError if the policy is given arguments it cannot handle.
-
-        ``route_code_prefix`` routes events based on a prefix of the route
-        code in the event. It takes a ``route_prefix`` argument to match on
-        (e.g. '0') and a ``consume_route`` argument, which, if True, removes
-        the prefix from the ``route_code`` when forwarding events.
-
-        ``test_id`` routes events based on the test id.  It takes a single
-        argument, ``test_id``.  Use ``None`` to select non-test events.
-        """
-        policy_method = StreamResultRouter._policies.get(policy, None)
-        if not policy_method:
-            raise ValueError("bad policy %r" % (policy,))
-        policy_method(self, sink, **policy_args)
-        if do_start_stop_run:
-            self._sinks.append(sink)
-        if self._in_run:
-            sink.startTestRun()
-
-    def _map_route_code_prefix(self, sink, route_prefix, consume_route=False):
-        if '/' in route_prefix:
-            raise TypeError(
-                "%r is more than one route step long" % (route_prefix,))
-        self._route_code_prefixes[route_prefix] = (sink, consume_route)
-    _policies['route_code_prefix'] = _map_route_code_prefix
-
-    def _map_test_id(self, sink, test_id):
-        self._test_ids[test_id] = sink
-    _policies['test_id'] = _map_test_id
-
-
-class StreamTagger(CopyStreamResult):
-    """Adds or discards tags from StreamResult events."""
-
-    def __init__(self, targets, add=None, discard=None):
-        """Create a StreamTagger.
-
-        :param targets: A list of targets to forward events onto.
-        :param add: Either None or an iterable of tags to add to each event.
-        :param discard: Either None or an iterable of tags to discard from each
-            event.
-        """
-        super(StreamTagger, self).__init__(targets)
-        self.add = frozenset(add or ())
-        self.discard = frozenset(discard or ())
-
-    def status(self, *args, **kwargs):
-        test_tags = kwargs.get('test_tags') or set()
-        test_tags.update(self.add)
-        test_tags.difference_update(self.discard)
-        kwargs['test_tags'] = test_tags or None
-        super(StreamTagger, self).status(*args, **kwargs)
-
-
-class StreamToDict(StreamResult):
-    """A specialised StreamResult that emits a callback as tests complete.
-
-    Top level file attachments are simply discarded. Hung tests are detected
-    by stopTestRun and notified there and then.
-
-    The callback is passed a dict with the following keys:
-
-      * id: the test id.
-      * tags: The tags for the test. A set of unicode strings.
-      * details: A dict of file attachments - ``testtools.content.Content``
-        objects.
-      * status: One of the StreamResult status codes (including inprogress) or
-        'unknown' (used if only file events for a test were received...)
-      * timestamps: A pair of timestamps - the first one received with this
-        test id, and the one in the event that triggered the notification.
-        Hung tests have a None for the second end event. Timestamps are not
-        compared - their ordering is purely order received in the stream.
-
-    Only the most recent tags observed in the stream are reported.
-    """
-
-    def __init__(self, on_test):
-        """Create a StreamToDict calling on_test on test completions.
-
-        :param on_test: A callback that accepts one parameter - a dict
-            describing a test.
-        """
-        super(StreamToDict, self).__init__()
-        self.on_test = on_test
-        if parse_mime_type is None:
-            raise ImportError("mimeparse module missing.")
-
-    def startTestRun(self):
-        super(StreamToDict, self).startTestRun()
-        self._inprogress = {}
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        super(StreamToDict, self).status(test_id, test_status,
-            test_tags=test_tags, runnable=runnable, file_name=file_name,
-            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
-            route_code=route_code, timestamp=timestamp)
-        key = self._ensure_key(test_id, route_code, timestamp)
-        # update fields
-        if not key:
-            return
-        if test_status is not None:
-            self._inprogress[key]['status'] = test_status
-        self._inprogress[key]['timestamps'][1] = timestamp
-        case = self._inprogress[key]
-        if file_name is not None:
-            if file_name not in case['details']:
-                if mime_type is None:
-                    mime_type = 'application/octet-stream'
-                primary, sub, parameters = parse_mime_type(mime_type)
-                if 'charset' in parameters:
-                    if ',' in parameters['charset']:
-                        # testtools was emitting a bad encoding, workaround it,
-                        # Though this does lose data - probably want to drop
-                        # this in a few releases.
-                        parameters['charset'] = parameters['charset'][
-                            :parameters['charset'].find(',')]
-                content_type = ContentType(primary, sub, parameters)
-                content_bytes = []
-                case['details'][file_name] = Content(
-                    content_type, lambda:content_bytes)
-            case['details'][file_name].iter_bytes().append(file_bytes)
-        if test_tags is not None:
-            self._inprogress[key]['tags'] = test_tags
-        # notify completed tests.
-        if test_status not in (None, 'inprogress'):
-            self.on_test(self._inprogress.pop(key))
-
-    def stopTestRun(self):
-        super(StreamToDict, self).stopTestRun()
-        while self._inprogress:
-            case = self._inprogress.popitem()[1]
-            case['timestamps'][1] = None
-            self.on_test(case)
-
-    def _ensure_key(self, test_id, route_code, timestamp):
-        if test_id is None:
-            return
-        key = (test_id, route_code)
-        if key not in self._inprogress:
-            self._inprogress[key] = {
-                'id': test_id,
-                'tags': set(),
-                'details': {},
-                'status': 'unknown',
-                'timestamps': [timestamp, None]}
-        return key
-
-
-_status_map = {
-    'inprogress': 'addFailure',
-    'unknown': 'addFailure',
-    'success': 'addSuccess',
-    'skip': 'addSkip',
-    'fail': 'addFailure',
-    'xfail': 'addExpectedFailure',
-    'uxsuccess': 'addUnexpectedSuccess',
-    }
-
-
-def test_dict_to_case(test_dict):
-    """Convert a test dict into a TestCase object.
-
-    :param test_dict: A test dict as generated by StreamToDict.
-    :return: A PlaceHolder test object.
-    """
-    # Circular import.
-    global PlaceHolder
-    if PlaceHolder is None:
-        from testtools.testcase import PlaceHolder
-    outcome = _status_map[test_dict['status']]
-    return PlaceHolder(test_dict['id'], outcome=outcome,
-        details=test_dict['details'], tags=test_dict['tags'],
-        timestamps=test_dict['timestamps'])
-
-
-class StreamSummary(StreamToDict):
-    """A specialised StreamResult that summarises a stream.
-
-    The summary uses the same representation as the original
-    unittest.TestResult contract, allowing it to be consumed by any test
-    runner.
-    """
-
-    def __init__(self):
-        super(StreamSummary, self).__init__(self._gather_test)
-        self._handle_status = {
-            'success': self._success,
-            'skip': self._skip,
-            'exists': self._exists,
-            'fail': self._fail,
-            'xfail': self._xfail,
-            'uxsuccess': self._uxsuccess,
-            'unknown': self._incomplete,
-            'inprogress': self._incomplete,
-            }
-
-    def startTestRun(self):
-        super(StreamSummary, self).startTestRun()
-        self.failures = []
-        self.errors = []
-        self.testsRun = 0
-        self.skipped = []
-        self.expectedFailures = []
-        self.unexpectedSuccesses = []
-
-    def wasSuccessful(self):
-        """Return False if any failure has occured.
-
-        Note that incomplete tests can only be detected when stopTestRun is
-        called, so that should be called before checking wasSuccessful.
-        """
-        return (not self.failures and not self.errors)
-
-    def _gather_test(self, test_dict):
-        if test_dict['status'] == 'exists':
-            return
-        self.testsRun += 1
-        case = test_dict_to_case(test_dict)
-        self._handle_status[test_dict['status']](case)
-
-    def _incomplete(self, case):
-        self.errors.append((case, "Test did not complete"))
-
-    def _success(self, case):
-        pass
-
-    def _skip(self, case):
-        if 'reason' not in case._details:
-            reason = "Unknown"
-        else:
-            reason = case._details['reason'].as_text()
-        self.skipped.append((case, reason))
-
-    def _exists(self, case):
-        pass
-
-    def _fail(self, case):
-        message = _details_to_str(case._details, special="traceback")
-        self.errors.append((case, message))
-
-    def _xfail(self, case):
-        message = _details_to_str(case._details, special="traceback")
-        self.expectedFailures.append((case, message))
-
-    def _uxsuccess(self, case):
-        case._outcome = 'addUnexpectedSuccess'
-        self.unexpectedSuccesses.append(case)
-
-
-class TestControl(object):
-    """Controls a running test run, allowing it to be interrupted.
-
-    :ivar shouldStop: If True, tests should not run and should instead
-        return immediately. Similarly a TestSuite should check this between
-        each test and if set stop dispatching any new tests and return.
-    """
-
-    def __init__(self):
-        super(TestControl, self).__init__()
-        self.shouldStop = False
-
-    def stop(self):
-        """Indicate that tests should stop running."""
-        self.shouldStop = True
-
-
-class MultiTestResult(TestResult):
-    """A test result that dispatches to many test results."""
-
-    def __init__(self, *results):
-        # Setup _results first, as the base class __init__ assigns to failfast.
-        self._results = list(map(ExtendedToOriginalDecorator, results))
-        super(MultiTestResult, self).__init__()
-
-    def __repr__(self):
-        return '<%s (%s)>' % (
-            self.__class__.__name__, ', '.join(map(repr, self._results)))
-
-    def _dispatch(self, message, *args, **kwargs):
-        return tuple(
-            getattr(result, message)(*args, **kwargs)
-            for result in self._results)
-
-    def _get_failfast(self):
-        return getattr(self._results[0], 'failfast', False)
-    def _set_failfast(self, value):
-        self._dispatch('__setattr__', 'failfast', value)
-    failfast = property(_get_failfast, _set_failfast)
-
-    def _get_shouldStop(self):
-        return any(self._dispatch('__getattr__', 'shouldStop'))
-    def _set_shouldStop(self, value):
-        # Called because we subclass TestResult. Probably should not do that.
-        pass
-    shouldStop = property(_get_shouldStop, _set_shouldStop)
-
-    def startTest(self, test):
-        super(MultiTestResult, self).startTest(test)
-        return self._dispatch('startTest', test)
-
-    def stop(self):
-        return self._dispatch('stop')
-
-    def stopTest(self, test):
-        super(MultiTestResult, self).stopTest(test)
-        return self._dispatch('stopTest', test)
-
-    def addError(self, test, error=None, details=None):
-        return self._dispatch('addError', test, error, details=details)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        return self._dispatch(
-            'addExpectedFailure', test, err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        return self._dispatch('addFailure', test, err, details=details)
-
-    def addSkip(self, test, reason=None, details=None):
-        return self._dispatch('addSkip', test, reason, details=details)
-
-    def addSuccess(self, test, details=None):
-        return self._dispatch('addSuccess', test, details=details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        return self._dispatch('addUnexpectedSuccess', test, details=details)
-
-    def startTestRun(self):
-        super(MultiTestResult, self).startTestRun()
-        return self._dispatch('startTestRun')
-
-    def stopTestRun(self):
-        return self._dispatch('stopTestRun')
-
-    def tags(self, new_tags, gone_tags):
-        super(MultiTestResult, self).tags(new_tags, gone_tags)
-        return self._dispatch('tags', new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        return self._dispatch('time', a_datetime)
-
-    def done(self):
-        return self._dispatch('done')
-
-    def wasSuccessful(self):
-        """Was this result successful?
-
-        Only returns True if every constituent result was successful.
-        """
-        return all(self._dispatch('wasSuccessful'))
-
-
-class TextTestResult(TestResult):
-    """A TestResult which outputs activity to a text stream."""
-
-    def __init__(self, stream, failfast=False):
-        """Construct a TextTestResult writing to stream."""
-        super(TextTestResult, self).__init__(failfast=failfast)
-        self.stream = stream
-        self.sep1 = '=' * 70 + '\n'
-        self.sep2 = '-' * 70 + '\n'
-
-    def _delta_to_float(self, a_timedelta):
-        return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
-            a_timedelta.microseconds / 1000000.0)
-
-    def _show_list(self, label, error_list):
-        for test, output in error_list:
-            self.stream.write(self.sep1)
-            self.stream.write("%s: %s\n" % (label, test.id()))
-            self.stream.write(self.sep2)
-            self.stream.write(output)
-
-    def startTestRun(self):
-        super(TextTestResult, self).startTestRun()
-        self.__start = self._now()
-        self.stream.write("Tests running...\n")
-
-    def stopTestRun(self):
-        if self.testsRun != 1:
-            plural = 's'
-        else:
-            plural = ''
-        stop = self._now()
-        self._show_list('ERROR', self.errors)
-        self._show_list('FAIL', self.failures)
-        for test in self.unexpectedSuccesses:
-            self.stream.write(
-                "%sUNEXPECTED SUCCESS: %s\n%s" % (
-                    self.sep1, test.id(), self.sep2))
-        self.stream.write("\nRan %d test%s in %.3fs\n" %
-            (self.testsRun, plural,
-             self._delta_to_float(stop - self.__start)))
-        if self.wasSuccessful():
-            self.stream.write("OK\n")
-        else:
-            self.stream.write("FAILED (")
-            details = []
-            details.append("failures=%d" % (
-                sum(map(len, (
-                    self.failures, self.errors, self.unexpectedSuccesses)))))
-            self.stream.write(", ".join(details))
-            self.stream.write(")\n")
-        super(TextTestResult, self).stopTestRun()
-
-
-class ThreadsafeForwardingResult(TestResult):
-    """A TestResult which ensures the target does not receive mixed up calls.
-
-    Multiple ``ThreadsafeForwardingResults`` can forward to the same target
-    result, and that target result will only ever receive the complete set of
-    events for one test at a time.
-
-    This is enforced using a semaphore, which further guarantees that tests
-    will be sent atomically even if the ``ThreadsafeForwardingResults`` are in
-    different threads.
-
-    ``ThreadsafeForwardingResult`` is typically used by
-    ``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult``
-    per thread, each of which wraps of the TestResult that
-    ``ConcurrentTestSuite.run()`` is called with.
-
-    target.startTestRun() and target.stopTestRun() are called once for each
-    ThreadsafeForwardingResult that forwards to the same target. If the target
-    takes special action on these events, it should take care to accommodate
-    this.
-
-    time() and tags() calls are batched to be adjacent to the test result and
-    in the case of tags() are coerced into test-local scope, avoiding the
-    opportunity for bugs around global state in the target.
-    """
-
-    def __init__(self, target, semaphore):
-        """Create a ThreadsafeForwardingResult forwarding to target.
-
-        :param target: A ``TestResult``.
-        :param semaphore: A ``threading.Semaphore`` with limit 1.
-        """
-        TestResult.__init__(self)
-        self.result = ExtendedToOriginalDecorator(target)
-        self.semaphore = semaphore
-        self._test_start = None
-        self._global_tags = set(), set()
-        self._test_tags = set(), set()
-
-    def __repr__(self):
-        return '<%s %r>' % (self.__class__.__name__, self.result)
-
-    def _any_tags(self, tags):
-        return bool(tags[0] or tags[1])
-
-    def _add_result_with_semaphore(self, method, test, *args, **kwargs):
-        now = self._now()
-        self.semaphore.acquire()
-        try:
-            self.result.time(self._test_start)
-            self.result.startTest(test)
-            self.result.time(now)
-            if self._any_tags(self._global_tags):
-                self.result.tags(*self._global_tags)
-            if self._any_tags(self._test_tags):
-                self.result.tags(*self._test_tags)
-            self._test_tags = set(), set()
-            try:
-                method(test, *args, **kwargs)
-            finally:
-                self.result.stopTest(test)
-        finally:
-            self.semaphore.release()
-        self._test_start = None
-
-    def addError(self, test, err=None, details=None):
-        self._add_result_with_semaphore(self.result.addError,
-            test, err, details=details)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._add_result_with_semaphore(self.result.addExpectedFailure,
-            test, err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        self._add_result_with_semaphore(self.result.addFailure,
-            test, err, details=details)
-
-    def addSkip(self, test, reason=None, details=None):
-        self._add_result_with_semaphore(self.result.addSkip,
-            test, reason, details=details)
-
-    def addSuccess(self, test, details=None):
-        self._add_result_with_semaphore(self.result.addSuccess,
-            test, details=details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
-            test, details=details)
-
-    def progress(self, offset, whence):
-        pass
-
-    def startTestRun(self):
-        super(ThreadsafeForwardingResult, self).startTestRun()
-        self.semaphore.acquire()
-        try:
-            self.result.startTestRun()
-        finally:
-            self.semaphore.release()
-
-    def _get_shouldStop(self):
-        self.semaphore.acquire()
-        try:
-            return self.result.shouldStop
-        finally:
-            self.semaphore.release()
-    def _set_shouldStop(self, value):
-        # Another case where we should not subclass TestResult
-        pass
-    shouldStop = property(_get_shouldStop, _set_shouldStop)
-
-    def stop(self):
-        self.semaphore.acquire()
-        try:
-            self.result.stop()
-        finally:
-            self.semaphore.release()
-
-    def stopTestRun(self):
-        self.semaphore.acquire()
-        try:
-            self.result.stopTestRun()
-        finally:
-            self.semaphore.release()
-
-    def done(self):
-        self.semaphore.acquire()
-        try:
-            self.result.done()
-        finally:
-            self.semaphore.release()
-
-    def startTest(self, test):
-        self._test_start = self._now()
-        super(ThreadsafeForwardingResult, self).startTest(test)
-
-    def wasSuccessful(self):
-        return self.result.wasSuccessful()
-
-    def tags(self, new_tags, gone_tags):
-        """See `TestResult`."""
-        super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags)
-        if self._test_start is not None:
-            self._test_tags = _merge_tags(
-                self._test_tags, (new_tags, gone_tags))
-        else:
-            self._global_tags = _merge_tags(
-                self._global_tags, (new_tags, gone_tags))
-
-
-def _merge_tags(existing, changed):
-    new_tags, gone_tags = changed
-    result_new = set(existing[0])
-    result_gone = set(existing[1])
-    result_new.update(new_tags)
-    result_new.difference_update(gone_tags)
-    result_gone.update(gone_tags)
-    result_gone.difference_update(new_tags)
-    return result_new, result_gone
-
-
-class ExtendedToOriginalDecorator(object):
-    """Permit new TestResult API code to degrade gracefully with old results.
-
-    This decorates an existing TestResult and converts missing outcomes
-    such as addSkip to older outcomes such as addSuccess. It also supports
-    the extended details protocol. In all cases the most recent protocol
-    is attempted first, and fallbacks only occur when the decorated result
-    does not support the newer style of calling.
-    """
-
-    def __init__(self, decorated):
-        self.decorated = decorated
-        self._tags = TagContext()
-        # Only used for old TestResults that do not have failfast.
-        self._failfast = False
-
-    def __repr__(self):
-        return '<%s %r>' % (self.__class__.__name__, self.decorated)
-
-    def __getattr__(self, name):
-        return getattr(self.decorated, name)
-
-    def addError(self, test, err=None, details=None):
-        try:
-            self._check_args(err, details)
-            if details is not None:
-                try:
-                    return self.decorated.addError(test, details=details)
-                except TypeError:
-                    # have to convert
-                    err = self._details_to_exc_info(details)
-            return self.decorated.addError(test, err)
-        finally:
-            if self.failfast:
-                self.stop()
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._check_args(err, details)
-        addExpectedFailure = getattr(
-            self.decorated, 'addExpectedFailure', None)
-        if addExpectedFailure is None:
-            return self.addSuccess(test)
-        if details is not None:
-            try:
-                return addExpectedFailure(test, details=details)
-            except TypeError:
-                # have to convert
-                err = self._details_to_exc_info(details)
-        return addExpectedFailure(test, err)
-
-    def addFailure(self, test, err=None, details=None):
-        try:
-            self._check_args(err, details)
-            if details is not None:
-                try:
-                    return self.decorated.addFailure(test, details=details)
-                except TypeError:
-                    # have to convert
-                    err = self._details_to_exc_info(details)
-            return self.decorated.addFailure(test, err)
-        finally:
-            if self.failfast:
-                self.stop()
-
-    def addSkip(self, test, reason=None, details=None):
-        self._check_args(reason, details)
-        addSkip = getattr(self.decorated, 'addSkip', None)
-        if addSkip is None:
-            return self.decorated.addSuccess(test)
-        if details is not None:
-            try:
-                return addSkip(test, details=details)
-            except TypeError:
-                # extract the reason if it's available
-                try:
-                    reason = details['reason'].as_text()
-                except KeyError:
-                    reason = _details_to_str(details)
-        return addSkip(test, reason)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        try:
-            outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
-            if outcome is None:
-                try:
-                    test.fail("")
-                except test.failureException:
-                    return self.addFailure(test, sys.exc_info())
-            if details is not None:
-                try:
-                    return outcome(test, details=details)
-                except TypeError:
-                    pass
-            return outcome(test)
-        finally:
-            if self.failfast:
-                self.stop()
-
-    def addSuccess(self, test, details=None):
-        if details is not None:
-            try:
-                return self.decorated.addSuccess(test, details=details)
-            except TypeError:
-                pass
-        return self.decorated.addSuccess(test)
-
-    def _check_args(self, err, details):
-        param_count = 0
-        if err is not None:
-            param_count += 1
-        if details is not None:
-            param_count += 1
-        if param_count != 1:
-            raise ValueError("Must pass only one of err '%s' and details '%s"
-                % (err, details))
-
-    def _details_to_exc_info(self, details):
-        """Convert a details dict to an exc_info tuple."""
-        return (
-            _StringException,
-            _StringException(_details_to_str(details, special='traceback')),
-            None)
-
-    @property
-    def current_tags(self):
-        return getattr(
-            self.decorated, 'current_tags', self._tags.get_current_tags())
-
-    def done(self):
-        try:
-            return self.decorated.done()
-        except AttributeError:
-            return
-
-    def _get_failfast(self):
-        return getattr(self.decorated, 'failfast', self._failfast)
-    def _set_failfast(self, value):
-        if safe_hasattr(self.decorated, 'failfast'):
-            self.decorated.failfast = value
-        else:
-            self._failfast = value
-    failfast = property(_get_failfast, _set_failfast)
-
-    def progress(self, offset, whence):
-        method = getattr(self.decorated, 'progress', None)
-        if method is None:
-            return
-        return method(offset, whence)
-
-    @property
-    def shouldStop(self):
-        return self.decorated.shouldStop
-
-    def startTest(self, test):
-        self._tags = TagContext(self._tags)
-        return self.decorated.startTest(test)
-
-    def startTestRun(self):
-        self._tags = TagContext()
-        try:
-            return self.decorated.startTestRun()
-        except AttributeError:
-            return
-
-    def stop(self):
-        return self.decorated.stop()
-
-    def stopTest(self, test):
-        self._tags = self._tags.parent
-        return self.decorated.stopTest(test)
-
-    def stopTestRun(self):
-        try:
-            return self.decorated.stopTestRun()
-        except AttributeError:
-            return
-
-    def tags(self, new_tags, gone_tags):
-        method = getattr(self.decorated, 'tags', None)
-        if method is not None:
-            return method(new_tags, gone_tags)
-        else:
-            self._tags.change_tags(new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        method = getattr(self.decorated, 'time', None)
-        if method is None:
-            return
-        return method(a_datetime)
-
-    def wasSuccessful(self):
-        return self.decorated.wasSuccessful()
-
-
-class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl):
-    """Permit using old TestResult API code with new StreamResult objects.
-
-    This decorates a StreamResult and converts old (Python 2.6 / 2.7 /
-    Extended) TestResult API calls into StreamResult calls.
-
-    It also supports regular StreamResult calls, making it safe to wrap around
-    any StreamResult.
-    """
-
-    def __init__(self, decorated):
-        super(ExtendedToStreamDecorator, self).__init__([decorated])
-        # Deal with mismatched base class constructors.
-        TestControl.__init__(self)
-        self._started = False
-
-    def _get_failfast(self):
-        return len(self.targets) == 2
-    def _set_failfast(self, value):
-        if value:
-            if len(self.targets) == 2:
-                return
-            self.targets.append(StreamFailFast(self.stop))
-        else:
-            del self.targets[1:]
-    failfast = property(_get_failfast, _set_failfast)
-
-    def startTest(self, test):
-        if not self._started:
-            self.startTestRun()
-        self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now())
-        self._tags = TagContext(self._tags)
-
-    def stopTest(self, test):
-        self._tags = self._tags.parent
-
-    def addError(self, test, err=None, details=None):
-        self._check_args(err, details)
-        self._convert(test, err, details, 'fail')
-    addFailure = addError
-
-    def _convert(self, test, err, details, status, reason=None):
-        if not self._started:
-            self.startTestRun()
-        test_id = test.id()
-        now = self._now()
-        if err is not None:
-            if details is None:
-                details = {}
-            details['traceback'] = TracebackContent(err, test)
-        if details is not None:
-            for name, content in details.items():
-                mime_type = repr(content.content_type)
-                file_bytes = None
-                for next_bytes in content.iter_bytes():
-                    if file_bytes is not None:
-                        self.status(file_name=name, file_bytes=file_bytes,
-                            mime_type=mime_type, test_id=test_id, timestamp=now)
-                    file_bytes = next_bytes
-                self.status(file_name=name, file_bytes=file_bytes, eof=True,
-                    mime_type=mime_type, test_id=test_id, timestamp=now)
-        if reason is not None:
-            self.status(file_name='reason', file_bytes=reason.encode('utf8'),
-                eof=True, mime_type="text/plain; charset=utf8",
-                test_id=test_id, timestamp=now)
-        self.status(test_id=test_id, test_status=status,
-            test_tags=self.current_tags, timestamp=now)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._check_args(err, details)
-        self._convert(test, err, details, 'xfail')
-
-    def addSkip(self, test, reason=None, details=None):
-        self._convert(test, None, details, 'skip', reason)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self._convert(test, None, details, 'uxsuccess')
-
-    def addSuccess(self, test, details=None):
-        self._convert(test, None, details, 'success')
-
-    def _check_args(self, err, details):
-        param_count = 0
-        if err is not None:
-            param_count += 1
-        if details is not None:
-            param_count += 1
-        if param_count != 1:
-            raise ValueError("Must pass only one of err '%s' and details '%s"
-                % (err, details))
-
-    def startTestRun(self):
-        super(ExtendedToStreamDecorator, self).startTestRun()
-        self._tags = TagContext()
-        self.shouldStop = False
-        self.__now = None
-        self._started = True
-
-    def stopTest(self, test):
-        self._tags = self._tags.parent
-
-    @property
-    def current_tags(self):
-        """The currently set tags."""
-        return self._tags.get_current_tags()
-
-    def tags(self, new_tags, gone_tags):
-        """Add and remove tags from the test.
-
-        :param new_tags: A set of tags to be added to the stream.
-        :param gone_tags: A set of tags to be removed from the stream.
-        """
-        self._tags.change_tags(new_tags, gone_tags)
-
-    def _now(self):
-        """Return the current 'test time'.
-
-        If the time() method has not been called, this is equivalent to
-        datetime.now(), otherwise its the last supplied datestamp given to the
-        time() method.
-        """
-        if self.__now is None:
-            return datetime.datetime.now(utc)
-        else:
-            return self.__now
-
-    def time(self, a_datetime):
-        self.__now = a_datetime
-
-    def wasSuccessful(self):
-        if not self._started:
-            self.startTestRun()
-        return super(ExtendedToStreamDecorator, self).wasSuccessful()
-
-
-class StreamToExtendedDecorator(StreamResult):
-    """Convert StreamResult API calls into ExtendedTestResult calls.
-
-    This will buffer all calls for all concurrently active tests, and
-    then flush each test as they complete.
-
-    Incomplete tests will be flushed as errors when the test run stops.
-
-    Non test file attachments are accumulated into a test called
-    'testtools.extradata' flushed at the end of the run.
-    """
-
-    def __init__(self, decorated):
-        # ExtendedToOriginalDecorator takes care of thunking details back to
-        # exceptions/reasons etc.
-        self.decorated = ExtendedToOriginalDecorator(decorated)
-        # StreamToDict buffers and gives us individual tests.
-        self.hook = StreamToDict(self._handle_tests)
-
-    def status(self, test_id=None, test_status=None, *args, **kwargs):
-        if test_status == 'exists':
-            return
-        self.hook.status(
-            test_id=test_id, test_status=test_status, *args, **kwargs)
-
-    def startTestRun(self):
-        self.decorated.startTestRun()
-        self.hook.startTestRun()
-
-    def stopTestRun(self):
-        self.hook.stopTestRun()
-        self.decorated.stopTestRun()
-
-    def _handle_tests(self, test_dict):
-        case = test_dict_to_case(test_dict)
-        case.run(self.decorated)
-
-
-class StreamToQueue(StreamResult):
-    """A StreamResult which enqueues events as a dict to a queue.Queue.
-
-    Events have their route code updated to include the route code
-    StreamToQueue was constructed with before they are submitted. If the event
-    route code is None, it is replaced with the StreamToQueue route code,
-    otherwise it is prefixed with the supplied code + a hyphen.
-
-    startTestRun and stopTestRun are forwarded to the queue. Implementors that
-    dequeue events back into StreamResult calls should take care not to call
-    startTestRun / stopTestRun on other StreamResult objects multiple times
-    (e.g. by filtering startTestRun and stopTestRun).
-
-    ``StreamToQueue`` is typically used by
-    ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue``
-    per thread, forwards status events to the the StreamResult that
-    ``ConcurrentStreamTestSuite.run()`` was called with, and uses the
-    stopTestRun event to trigger calling join() on the each thread.
-
-    Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes
-    place - any event supplied to a StreamToQueue will be inserted into the
-    queue immediately.
-
-    Events are forwarded as a dict with a key ``event`` which is one of
-    ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is
-    ``status`` the dict also has keys matching the keyword arguments
-    of ``StreamResult.status``, otherwise it has one other key ``result`` which
-    is the result that invoked ``startTestRun``.
-    """
-
-    def __init__(self, queue, routing_code):
-        """Create a StreamToQueue forwarding to target.
-
-        :param queue: A ``queue.Queue`` to receive events.
-        :param routing_code: The routing code to apply to messages.
-        """
-        super(StreamToQueue, self).__init__()
-        self.queue = queue
-        self.routing_code = routing_code
-
-    def startTestRun(self):
-        self.queue.put(dict(event='startTestRun', result=self))
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        self.queue.put(dict(event='status', test_id=test_id,
-            test_status=test_status, test_tags=test_tags, runnable=runnable,
-            file_name=file_name, file_bytes=file_bytes, eof=eof,
-            mime_type=mime_type, route_code=self.route_code(route_code),
-            timestamp=timestamp))
-
-    def stopTestRun(self):
-        self.queue.put(dict(event='stopTestRun', result=self))
-
-    def route_code(self, route_code):
-        """Adjust route_code on the way through."""
-        if route_code is None:
-            return self.routing_code
-        return self.routing_code + _u("/") + route_code
-
-
-class TestResultDecorator(object):
-    """General pass-through decorator.
-
-    This provides a base that other TestResults can inherit from to
-    gain basic forwarding functionality.
-    """
-
-    def __init__(self, decorated):
-        """Create a TestResultDecorator forwarding to decorated."""
-        self.decorated = decorated
-
-    def startTest(self, test):
-        return self.decorated.startTest(test)
-
-    def startTestRun(self):
-        return self.decorated.startTestRun()
-
-    def stopTest(self, test):
-        return self.decorated.stopTest(test)
-
-    def stopTestRun(self):
-        return self.decorated.stopTestRun()
-
-    def addError(self, test, err=None, details=None):
-        return self.decorated.addError(test, err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        return self.decorated.addFailure(test, err, details=details)
-
-    def addSuccess(self, test, details=None):
-        return self.decorated.addSuccess(test, details=details)
-
-    def addSkip(self, test, reason=None, details=None):
-        return self.decorated.addSkip(test, reason, details=details)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        return self.decorated.addExpectedFailure(test, err, details=details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        return self.decorated.addUnexpectedSuccess(test, details=details)
-
-    def progress(self, offset, whence):
-        return self.decorated.progress(offset, whence)
-
-    def wasSuccessful(self):
-        return self.decorated.wasSuccessful()
-
-    @property
-    def current_tags(self):
-        return self.decorated.current_tags
-
-    @property
-    def shouldStop(self):
-        return self.decorated.shouldStop
-
-    def stop(self):
-        return self.decorated.stop()
-
-    @property
-    def testsRun(self):
-        return self.decorated.testsRun
-
-    def tags(self, new_tags, gone_tags):
-        return self.decorated.tags(new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        return self.decorated.time(a_datetime)
-
-
-class Tagger(TestResultDecorator):
-    """Tag each test individually."""
-
-    def __init__(self, decorated, new_tags, gone_tags):
-        """Wrap 'decorated' such that each test is tagged.
-
-        :param new_tags: Tags to be added for each test.
-        :param gone_tags: Tags to be removed for each test.
-        """
-        super(Tagger, self).__init__(decorated)
-        self._new_tags = set(new_tags)
-        self._gone_tags = set(gone_tags)
-
-    def startTest(self, test):
-        super(Tagger, self).startTest(test)
-        self.tags(self._new_tags, self._gone_tags)
-
-
-class TestByTestResult(TestResult):
-    """Call something every time a test completes."""
-
-    def __init__(self, on_test):
-        """Construct a ``TestByTestResult``.
-
-        :param on_test: A callable that take a test case, a status (one of
-            "success", "failure", "error", "skip", or "xfail"), a start time
-            (a ``datetime`` with timezone), a stop time, an iterable of tags,
-            and a details dict. Is called at the end of each test (i.e. on
-            ``stopTest``) with the accumulated values for that test.
-        """
-        super(TestByTestResult, self).__init__()
-        self._on_test = on_test
-
-    def startTest(self, test):
-        super(TestByTestResult, self).startTest(test)
-        self._start_time = self._now()
-        # There's no supported (i.e. tested) behaviour that relies on these
-        # being set, but it makes me more comfortable all the same. -- jml
-        self._status = None
-        self._details = None
-        self._stop_time = None
-
-    def stopTest(self, test):
-        self._stop_time = self._now()
-        tags = set(self.current_tags)
-        super(TestByTestResult, self).stopTest(test)
-        self._on_test(
-            test=test,
-            status=self._status,
-            start_time=self._start_time,
-            stop_time=self._stop_time,
-            tags=tags,
-            details=self._details)
-
-    def _err_to_details(self, test, err, details):
-        if details:
-            return details
-        return {'traceback': TracebackContent(err, test)}
-
-    def addSuccess(self, test, details=None):
-        super(TestByTestResult, self).addSuccess(test)
-        self._status = 'success'
-        self._details = details
-
-    def addFailure(self, test, err=None, details=None):
-        super(TestByTestResult, self).addFailure(test, err, details)
-        self._status = 'failure'
-        self._details = self._err_to_details(test, err, details)
-
-    def addError(self, test, err=None, details=None):
-        super(TestByTestResult, self).addError(test, err, details)
-        self._status = 'error'
-        self._details = self._err_to_details(test, err, details)
-
-    def addSkip(self, test, reason=None, details=None):
-        super(TestByTestResult, self).addSkip(test, reason, details)
-        self._status = 'skip'
-        if details is None:
-            details = {'reason': text_content(reason)}
-        elif reason:
-            # XXX: What if details already has 'reason' key?
-            details['reason'] = text_content(reason)
-        self._details = details
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        super(TestByTestResult, self).addExpectedFailure(test, err, details)
-        self._status = 'xfail'
-        self._details = self._err_to_details(test, err, details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        super(TestByTestResult, self).addUnexpectedSuccess(test, details)
-        self._status = 'success'
-        self._details = details
-
-
-class TimestampingStreamResult(CopyStreamResult):
-    """A StreamResult decorator that assigns a timestamp when none is present.
-
-    This is convenient for ensuring events are timestamped.
-    """
-
-    def __init__(self, target):
-        super(TimestampingStreamResult, self).__init__([target])
-
-    def status(self, *args, **kwargs):
-        timestamp = kwargs.pop('timestamp', None)
-        if timestamp is None:
-            timestamp = datetime.datetime.now(utc)
-        super(TimestampingStreamResult, self).status(
-            *args, timestamp=timestamp, **kwargs)
-
-
-class _StringException(Exception):
-    """An exception made from an arbitrary string."""
-
-    if not str_is_unicode:
-        def __init__(self, string):
-            if type(string) is not unicode:
-                raise TypeError("_StringException expects unicode, got %r" %
-                    (string,))
-            Exception.__init__(self, string)
-
-        def __str__(self):
-            return self.args[0].encode("utf-8")
-
-        def __unicode__(self):
-            return self.args[0]
-    # For 3.0 and above the default __str__ is fine, so we don't define one.
-
-    def __hash__(self):
-        return id(self)
-
-    def __eq__(self, other):
-        try:
-            return self.args == other.args
-        except AttributeError:
-            return False
-
-
-def _format_text_attachment(name, text):
-    if '\n' in text:
-        return "%s: {{{\n%s\n}}}\n" % (name, text)
-    return "%s: {{{%s}}}" % (name, text)
-
-
-def _details_to_str(details, special=None):
-    """Convert a details dict to a string.
-
-    :param details: A dictionary mapping short names to ``Content`` objects.
-    :param special: If specified, an attachment that should have special
-        attention drawn to it. The primary attachment. Normally it's the
-        traceback that caused the test to fail.
-    :return: A formatted string that can be included in text test results.
-    """
-    empty_attachments = []
-    binary_attachments = []
-    text_attachments = []
-    special_content = None
-    # sorted is for testing, may want to remove that and use a dict
-    # subclass with defined order for items instead.
-    for key, content in sorted(details.items()):
-        if content.content_type.type != 'text':
-            binary_attachments.append((key, content.content_type))
-            continue
-        text = content.as_text().strip()
-        if not text:
-            empty_attachments.append(key)
-            continue
-        # We want the 'special' attachment to be at the bottom.
-        if key == special:
-            special_content = '%s\n' % (text,)
-            continue
-        text_attachments.append(_format_text_attachment(key, text))
-    if text_attachments and not text_attachments[-1].endswith('\n'):
-        text_attachments.append('')
-    if special_content:
-        text_attachments.append(special_content)
-    lines = []
-    if binary_attachments:
-        lines.append('Binary content:\n')
-        for name, content_type in binary_attachments:
-            lines.append('  %s (%s)\n' % (name, content_type))
-    if empty_attachments:
-        lines.append('Empty attachments:\n')
-        for name in empty_attachments:
-            lines.append('  %s\n' % (name,))
-    if (binary_attachments or empty_attachments) and text_attachments:
-        lines.append('\n')
-    lines.append('\n'.join(text_attachments))
-    return _u('').join(lines)
diff --git a/lib/testtools/testtools/tests/__init__.py b/lib/testtools/testtools/tests/__init__.py
deleted file mode 100644
index d40fcb3..0000000
--- a/lib/testtools/testtools/tests/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
-
-"""Tests for testtools itself."""
-
-
-from unittest import TestSuite
-
-
-def test_suite():
-    from testtools.tests import (
-        matchers,
-        test_assert_that,
-        test_compat,
-        test_content,
-        test_content_type,
-        test_deferredruntest,
-        test_distutilscmd,
-        test_fixturesupport,
-        test_helpers,
-        test_monkey,
-        test_run,
-        test_runtest,
-        test_spinner,
-        test_tags,
-        test_testcase,
-        test_testresult,
-        test_testsuite,
-        )
-    modules = [
-        matchers,
-        test_assert_that,
-        test_compat,
-        test_content,
-        test_content_type,
-        test_deferredruntest,
-        test_distutilscmd,
-        test_fixturesupport,
-        test_helpers,
-        test_monkey,
-        test_run,
-        test_runtest,
-        test_spinner,
-        test_tags,
-        test_testcase,
-        test_testresult,
-        test_testsuite,
-        ]
-    suites = map(lambda x: x.test_suite(), modules)
-    return TestSuite(suites)
diff --git a/lib/testtools/testtools/tests/helpers.py b/lib/testtools/testtools/tests/helpers.py
deleted file mode 100644
index f766da3..0000000
--- a/lib/testtools/testtools/tests/helpers.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Helpers for tests."""
-
-__all__ = [
-    'LoggingResult',
-    ]
-
-import sys
-
-from extras import safe_hasattr
-
-from testtools import TestResult
-from testtools.content import StackLinesContent
-from testtools import runtest
-
-
-# Importing to preserve compatibility.
-safe_hasattr
-
-# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
-try:
-    raise Exception
-except Exception:
-    an_exc_info = sys.exc_info()
-
-# Deprecated: This classes attributes are somewhat non deterministic which
-# leads to hard to predict tests (because Python upstream are changing things.
-class LoggingResult(TestResult):
-    """TestResult that logs its event to a list."""
-
-    def __init__(self, log):
-        self._events = log
-        super(LoggingResult, self).__init__()
-
-    def startTest(self, test):
-        self._events.append(('startTest', test))
-        super(LoggingResult, self).startTest(test)
-
-    def stop(self):
-        self._events.append('stop')
-        super(LoggingResult, self).stop()
-
-    def stopTest(self, test):
-        self._events.append(('stopTest', test))
-        super(LoggingResult, self).stopTest(test)
-
-    def addFailure(self, test, error):
-        self._events.append(('addFailure', test, error))
-        super(LoggingResult, self).addFailure(test, error)
-
-    def addError(self, test, error):
-        self._events.append(('addError', test, error))
-        super(LoggingResult, self).addError(test, error)
-
-    def addSkip(self, test, reason):
-        self._events.append(('addSkip', test, reason))
-        super(LoggingResult, self).addSkip(test, reason)
-
-    def addSuccess(self, test):
-        self._events.append(('addSuccess', test))
-        super(LoggingResult, self).addSuccess(test)
-
-    def startTestRun(self):
-        self._events.append('startTestRun')
-        super(LoggingResult, self).startTestRun()
-
-    def stopTestRun(self):
-        self._events.append('stopTestRun')
-        super(LoggingResult, self).stopTestRun()
-
-    def done(self):
-        self._events.append('done')
-        super(LoggingResult, self).done()
-
-    def tags(self, new_tags, gone_tags):
-        self._events.append(('tags', new_tags, gone_tags))
-        super(LoggingResult, self).tags(new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        self._events.append(('time', a_datetime))
-        super(LoggingResult, self).time(a_datetime)
-
-
-def is_stack_hidden():
-    return StackLinesContent.HIDE_INTERNAL_STACK
-
-
-def hide_testtools_stack(should_hide=True):
-    result = StackLinesContent.HIDE_INTERNAL_STACK
-    StackLinesContent.HIDE_INTERNAL_STACK = should_hide
-    return result
-
-
-def run_with_stack_hidden(should_hide, f, *args, **kwargs):
-    old_should_hide = hide_testtools_stack(should_hide)
-    try:
-        return f(*args, **kwargs)
-    finally:
-        hide_testtools_stack(old_should_hide)
-
-
-class FullStackRunTest(runtest.RunTest):
-
-    def _run_user(self, fn, *args, **kwargs):
-        return run_with_stack_hidden(
-            False,
-            super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
diff --git a/lib/testtools/testtools/tests/matchers/__init__.py b/lib/testtools/testtools/tests/matchers/__init__.py
deleted file mode 100644
index ebab308..0000000
--- a/lib/testtools/testtools/tests/matchers/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
-
-
-from unittest import TestSuite
-
-
-def test_suite():
-    from testtools.tests.matchers import (
-        test_basic,
-        test_datastructures,
-        test_dict,
-        test_doctest,
-        test_exception,
-        test_filesystem,
-        test_higherorder,
-        test_impl,
-        )
-    modules = [
-        test_basic,
-        test_datastructures,
-        test_dict,
-        test_doctest,
-        test_exception,
-        test_filesystem,
-        test_higherorder,
-        test_impl,
-        ]
-    suites = map(lambda x: x.test_suite(), modules)
-    return TestSuite(suites)
diff --git a/lib/testtools/testtools/tests/matchers/helpers.py b/lib/testtools/testtools/tests/matchers/helpers.py
deleted file mode 100644
index 3ff8727..0000000
--- a/lib/testtools/testtools/tests/matchers/helpers.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-from testtools.tests.helpers import FullStackRunTest
-
-
-class TestMatchersInterface(object):
-
-    run_tests_with = FullStackRunTest
-
-    def test_matches_match(self):
-        matcher = self.matches_matcher
-        matches = self.matches_matches
-        mismatches = self.matches_mismatches
-        for candidate in matches:
-            self.assertEqual(None, matcher.match(candidate))
-        for candidate in mismatches:
-            mismatch = matcher.match(candidate)
-            self.assertNotEqual(None, mismatch)
-            self.assertNotEqual(None, getattr(mismatch, 'describe', None))
-
-    def test__str__(self):
-        # [(expected, object to __str__)].
-        from testtools.matchers._doctest import DocTestMatches
-        examples = self.str_examples
-        for expected, matcher in examples:
-            self.assertThat(matcher, DocTestMatches(expected))
-
-    def test_describe_difference(self):
-        # [(expected, matchee, matcher), ...]
-        examples = self.describe_examples
-        for difference, matchee, matcher in examples:
-            mismatch = matcher.match(matchee)
-            self.assertEqual(difference, mismatch.describe())
-
-    def test_mismatch_details(self):
-        # The mismatch object must provide get_details, which must return a
-        # dictionary mapping names to Content objects.
-        examples = self.describe_examples
-        for difference, matchee, matcher in examples:
-            mismatch = matcher.match(matchee)
-            details = mismatch.get_details()
-            self.assertEqual(dict(details), details)
diff --git a/lib/testtools/testtools/tests/matchers/test_basic.py b/lib/testtools/testtools/tests/matchers/test_basic.py
deleted file mode 100644
index c53bc9e..0000000
--- a/lib/testtools/testtools/tests/matchers/test_basic.py
+++ /dev/null
@@ -1,396 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import re
-
-from testtools import TestCase
-from testtools.compat import (
-    text_repr,
-    _b,
-    _u,
-    )
-from testtools.matchers._basic import (
-    _BinaryMismatch,
-    Contains,
-    DoesNotEndWith,
-    DoesNotStartWith,
-    EndsWith,
-    Equals,
-    Is,
-    IsInstance,
-    LessThan,
-    GreaterThan,
-    HasLength,
-    MatchesRegex,
-    NotEquals,
-    SameMembers,
-    StartsWith,
-    )
-from testtools.tests.helpers import FullStackRunTest
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-class Test_BinaryMismatch(TestCase):
-    """Mismatches from binary comparisons need useful describe output"""
-
-    _long_string = "This is a longish multiline non-ascii string\n\xa7"
-    _long_b = _b(_long_string)
-    _long_u = _u(_long_string)
-
-    class CustomRepr(object):
-        def __init__(self, repr_string):
-            self._repr_string = repr_string
-        def __repr__(self):
-            return _u('<object ') + _u(self._repr_string) + _u('>')
-
-    def test_short_objects(self):
-        o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
-        mismatch = _BinaryMismatch(o1, "!~", o2)
-        self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
-
-    def test_short_mixed_strings(self):
-        b, u = _b("\xa7"), _u("\xa7")
-        mismatch = _BinaryMismatch(b, "!~", u)
-        self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
-
-    def test_long_bytes(self):
-        one_line_b = self._long_b.replace(_b("\n"), _b(" "))
-        mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
-        self.assertEqual(mismatch.describe(),
-            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
-                text_repr(one_line_b),
-                text_repr(self._long_b, multiline=True)))
-
-    def test_long_unicode(self):
-        one_line_u = self._long_u.replace("\n", " ")
-        mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
-        self.assertEqual(mismatch.describe(),
-            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
-                text_repr(one_line_u),
-                text_repr(self._long_u, multiline=True)))
-
-    def test_long_mixed_strings(self):
-        mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
-        self.assertEqual(mismatch.describe(),
-            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
-                text_repr(self._long_b, multiline=True),
-                text_repr(self._long_u, multiline=True)))
-
-    def test_long_bytes_and_object(self):
-        obj = object()
-        mismatch = _BinaryMismatch(self._long_b, "!~", obj)
-        self.assertEqual(mismatch.describe(),
-            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
-                text_repr(self._long_b, multiline=True),
-                repr(obj)))
-
-    def test_long_unicode_and_object(self):
-        obj = object()
-        mismatch = _BinaryMismatch(self._long_u, "!~", obj)
-        self.assertEqual(mismatch.describe(),
-            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
-                text_repr(self._long_u, multiline=True),
-                repr(obj)))
-
-
-class TestEqualsInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = Equals(1)
-    matches_matches = [1]
-    matches_mismatches = [2]
-
-    str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
-
-    describe_examples = [("1 != 2", 2, Equals(1))]
-
-
-class TestNotEqualsInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = NotEquals(1)
-    matches_matches = [2]
-    matches_mismatches = [1]
-
-    str_examples = [
-        ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
-
-    describe_examples = [("1 == 1", 1, NotEquals(1))]
-
-
-class TestIsInterface(TestCase, TestMatchersInterface):
-
-    foo = object()
-    bar = object()
-
-    matches_matcher = Is(foo)
-    matches_matches = [foo]
-    matches_mismatches = [bar, 1]
-
-    str_examples = [("Is(2)", Is(2))]
-
-    describe_examples = [("1 is not 2", 2, Is(1))]
-
-
-class TestIsInstanceInterface(TestCase, TestMatchersInterface):
-
-    class Foo:pass
-
-    matches_matcher = IsInstance(Foo)
-    matches_matches = [Foo()]
-    matches_mismatches = [object(), 1, Foo]
-
-    str_examples = [
-            ("IsInstance(str)", IsInstance(str)),
-            ("IsInstance(str, int)", IsInstance(str, int)),
-            ]
-
-    describe_examples = [
-            ("'foo' is not an instance of int", 'foo', IsInstance(int)),
-            ("'foo' is not an instance of any of (int, type)", 'foo',
-             IsInstance(int, type)),
-            ]
-
-
-class TestLessThanInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = LessThan(4)
-    matches_matches = [-5, 3]
-    matches_mismatches = [4, 5, 5000]
-
-    str_examples = [
-        ("LessThan(12)", LessThan(12)),
-        ]
-
-    describe_examples = [
-        ('4 is not > 5', 5, LessThan(4)),
-        ('4 is not > 4', 4, LessThan(4)),
-        ]
-
-
-class TestGreaterThanInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = GreaterThan(4)
-    matches_matches = [5, 8]
-    matches_mismatches = [-2, 0, 4]
-
-    str_examples = [
-        ("GreaterThan(12)", GreaterThan(12)),
-        ]
-
-    describe_examples = [
-        ('5 is not < 4', 4, GreaterThan(5)),
-        ('4 is not < 4', 4, GreaterThan(4)),
-        ]
-
-
-class TestContainsInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = Contains('foo')
-    matches_matches = ['foo', 'afoo', 'fooa']
-    matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
-
-    str_examples = [
-        ("Contains(1)", Contains(1)),
-        ("Contains('foo')", Contains('foo')),
-        ]
-
-    describe_examples = [("1 not in 2", 2, Contains(1))]
-
-
-class DoesNotStartWithTests(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_describe(self):
-        mismatch = DoesNotStartWith("fo", "bo")
-        self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
-
-    def test_describe_non_ascii_unicode(self):
-        string = _u("A\xA7")
-        suffix = _u("B\xA7")
-        mismatch = DoesNotStartWith(string, suffix)
-        self.assertEqual("%s does not start with %s." % (
-            text_repr(string), text_repr(suffix)),
-            mismatch.describe())
-
-    def test_describe_non_ascii_bytes(self):
-        string = _b("A\xA7")
-        suffix = _b("B\xA7")
-        mismatch = DoesNotStartWith(string, suffix)
-        self.assertEqual("%r does not start with %r." % (string, suffix),
-            mismatch.describe())
-
-
-class StartsWithTests(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_str(self):
-        matcher = StartsWith("bar")
-        self.assertEqual("StartsWith('bar')", str(matcher))
-
-    def test_str_with_bytes(self):
-        b = _b("\xA7")
-        matcher = StartsWith(b)
-        self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
-
-    def test_str_with_unicode(self):
-        u = _u("\xA7")
-        matcher = StartsWith(u)
-        self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
-
-    def test_match(self):
-        matcher = StartsWith("bar")
-        self.assertIs(None, matcher.match("barf"))
-
-    def test_mismatch_returns_does_not_start_with(self):
-        matcher = StartsWith("bar")
-        self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
-
-    def test_mismatch_sets_matchee(self):
-        matcher = StartsWith("bar")
-        mismatch = matcher.match("foo")
-        self.assertEqual("foo", mismatch.matchee)
-
-    def test_mismatch_sets_expected(self):
-        matcher = StartsWith("bar")
-        mismatch = matcher.match("foo")
-        self.assertEqual("bar", mismatch.expected)
-
-
-class DoesNotEndWithTests(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_describe(self):
-        mismatch = DoesNotEndWith("fo", "bo")
-        self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
-
-    def test_describe_non_ascii_unicode(self):
-        string = _u("A\xA7")
-        suffix = _u("B\xA7")
-        mismatch = DoesNotEndWith(string, suffix)
-        self.assertEqual("%s does not end with %s." % (
-            text_repr(string), text_repr(suffix)),
-            mismatch.describe())
-
-    def test_describe_non_ascii_bytes(self):
-        string = _b("A\xA7")
-        suffix = _b("B\xA7")
-        mismatch = DoesNotEndWith(string, suffix)
-        self.assertEqual("%r does not end with %r." % (string, suffix),
-            mismatch.describe())
-
-
-class EndsWithTests(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_str(self):
-        matcher = EndsWith("bar")
-        self.assertEqual("EndsWith('bar')", str(matcher))
-
-    def test_str_with_bytes(self):
-        b = _b("\xA7")
-        matcher = EndsWith(b)
-        self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
-
-    def test_str_with_unicode(self):
-        u = _u("\xA7")
-        matcher = EndsWith(u)
-        self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
-
-    def test_match(self):
-        matcher = EndsWith("arf")
-        self.assertIs(None, matcher.match("barf"))
-
-    def test_mismatch_returns_does_not_end_with(self):
-        matcher = EndsWith("bar")
-        self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
-
-    def test_mismatch_sets_matchee(self):
-        matcher = EndsWith("bar")
-        mismatch = matcher.match("foo")
-        self.assertEqual("foo", mismatch.matchee)
-
-    def test_mismatch_sets_expected(self):
-        matcher = EndsWith("bar")
-        mismatch = matcher.match("foo")
-        self.assertEqual("bar", mismatch.expected)
-
-
-class TestSameMembers(TestCase, TestMatchersInterface):
-
-    matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
-    matches_matches = [
-        [1, 1, 2, 3, {'foo': 'bar'}],
-        [3, {'foo': 'bar'}, 1, 2, 1],
-        [3, 2, 1, {'foo': 'bar'}, 1],
-        (2, {'foo': 'bar'}, 3, 1, 1),
-        ]
-    matches_mismatches = [
-        set([1, 2, 3]),
-        [1, 1, 2, 3, 5],
-        [1, 2, 3, {'foo': 'bar'}],
-        'foo',
-        ]
-
-    describe_examples = [
-        (("elements differ:\n"
-          "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
-          "actual    = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
-          ": \n"
-          "missing:    ['watermelon']\n"
-          "extra:      ['sparrow']"
-          ),
-         ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
-         SameMembers(
-             ['apple', 'orange', 'canteloupe', 'watermelon',
-              'lemon', 'banana',])),
-        ]
-
-    str_examples = [
-        ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
-        ]
-
-
-class TestMatchesRegex(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesRegex('a|b')
-    matches_matches = ['a', 'b']
-    matches_mismatches = ['c']
-
-    str_examples = [
-        ("MatchesRegex('a|b')", MatchesRegex('a|b')),
-        ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
-        ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
-        ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
-        ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
-        ]
-
-    describe_examples = [
-        ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
-        ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
-        ("%r does not match /\\s+\\xa7/" % (_b('c'),),
-            _b('c'), MatchesRegex(_b("\\s+\xA7"))),
-        ("%r does not match /\\s+\\xa7/" % (_u('c'),),
-            _u('c'), MatchesRegex(_u("\\s+\xA7"))),
-        ]
-
-
-class TestHasLength(TestCase, TestMatchersInterface):
-
-    matches_matcher = HasLength(2)
-    matches_matches = [[1, 2]]
-    matches_mismatches = [[], [1], [3, 2, 1]]
-
-    str_examples = [
-        ("HasLength(2)", HasLength(2)),
-        ]
-
-    describe_examples = [
-        ("len([]) != 1", [], HasLength(1)),
-        ]
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_datastructures.py b/lib/testtools/testtools/tests/matchers/test_datastructures.py
deleted file mode 100644
index f6d9d86..0000000
--- a/lib/testtools/testtools/tests/matchers/test_datastructures.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import doctest
-import re
-import sys
-
-from testtools import TestCase
-from testtools.compat import StringIO
-from testtools.matchers import (
-    Annotate,
-    Equals,
-    LessThan,
-    MatchesRegex,
-    NotEquals,
-    )
-from testtools.matchers._datastructures import (
-    ContainsAll,
-    MatchesListwise,
-    MatchesStructure,
-    MatchesSetwise,
-    )
-from testtools.tests.helpers import FullStackRunTest
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-def run_doctest(obj, name):
-    p = doctest.DocTestParser()
-    t = p.get_doctest(
-        obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
-    r = doctest.DocTestRunner()
-    output = StringIO()
-    r.run(t, out=output.write)
-    return r.failures, output.getvalue()
-
-
-class TestMatchesListwise(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_docstring(self):
-        failure_count, output = run_doctest(
-            MatchesListwise, "MatchesListwise")
-        if failure_count:
-            self.fail("Doctest failed with %s" % output)
-
-
-class TestMatchesStructure(TestCase, TestMatchersInterface):
-
-    class SimpleClass:
-        def __init__(self, x, y):
-            self.x = x
-            self.y = y
-
-    matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
-    matches_matches = [SimpleClass(1, 2)]
-    matches_mismatches = [
-        SimpleClass(2, 2),
-        SimpleClass(1, 1),
-        SimpleClass(3, 3),
-        ]
-
-    str_examples = [
-        ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
-        ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
-        ("MatchesStructure(x=Equals(1), y=Equals(2))",
-         MatchesStructure(x=Equals(1), y=Equals(2))),
-        ]
-
-    describe_examples = [
-        ("""\
-Differences: [
-3 != 1: x
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
-        ("""\
-Differences: [
-3 != 2: y
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
-        ("""\
-Differences: [
-0 != 1: x
-0 != 2: y
-]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
-        ]
-
-    def test_fromExample(self):
-        self.assertThat(
-            self.SimpleClass(1, 2),
-            MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
-
-    def test_byEquality(self):
-        self.assertThat(
-            self.SimpleClass(1, 2),
-            MatchesStructure.byEquality(x=1))
-
-    def test_withStructure(self):
-        self.assertThat(
-            self.SimpleClass(1, 2),
-            MatchesStructure.byMatcher(LessThan, x=2))
-
-    def test_update(self):
-        self.assertThat(
-            self.SimpleClass(1, 2),
-            MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
-
-    def test_update_none(self):
-        self.assertThat(
-            self.SimpleClass(1, 2),
-            MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
-                z=None))
-
-
-class TestMatchesSetwise(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def assertMismatchWithDescriptionMatching(self, value, matcher,
-                                              description_matcher):
-        mismatch = matcher.match(value)
-        if mismatch is None:
-            self.fail("%s matched %s" % (matcher, value))
-        actual_description = mismatch.describe()
-        self.assertThat(
-            actual_description,
-            Annotate(
-                "%s matching %s" % (matcher, value),
-                description_matcher))
-
-    def test_matches(self):
-        self.assertIs(
-            None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
-
-    def test_mismatches(self):
-        self.assertMismatchWithDescriptionMatching(
-            [2, 3], MatchesSetwise(Equals(1), Equals(2)),
-            MatchesRegex('.*There was 1 mismatch$', re.S))
-
-    def test_too_many_matchers(self):
-        self.assertMismatchWithDescriptionMatching(
-            [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
-            Equals('There was 1 matcher left over: Equals(1)'))
-
-    def test_too_many_values(self):
-        self.assertMismatchWithDescriptionMatching(
-            [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
-            Equals('There was 1 value left over: [3]'))
-
-    def test_two_too_many_matchers(self):
-        self.assertMismatchWithDescriptionMatching(
-            [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
-            MatchesRegex(
-                'There were 2 matchers left over: Equals\([12]\), '
-                'Equals\([12]\)'))
-
-    def test_two_too_many_values(self):
-        self.assertMismatchWithDescriptionMatching(
-            [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
-            MatchesRegex(
-                'There were 2 values left over: \[[34], [34]\]'))
-
-    def test_mismatch_and_too_many_matchers(self):
-        self.assertMismatchWithDescriptionMatching(
-            [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
-            MatchesRegex(
-                '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
-                re.S))
-
-    def test_mismatch_and_too_many_values(self):
-        self.assertMismatchWithDescriptionMatching(
-            [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
-            MatchesRegex(
-                '.*There was 1 mismatch and 1 extra value: \[[34]\]',
-                re.S))
-
-    def test_mismatch_and_two_too_many_matchers(self):
-        self.assertMismatchWithDescriptionMatching(
-            [3, 4], MatchesSetwise(
-                Equals(0), Equals(1), Equals(2), Equals(3)),
-            MatchesRegex(
-                '.*There was 1 mismatch and 2 extra matchers: '
-                'Equals\([012]\), Equals\([012]\)', re.S))
-
-    def test_mismatch_and_two_too_many_values(self):
-        self.assertMismatchWithDescriptionMatching(
-            [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
-            MatchesRegex(
-                '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
-                re.S))
-
-
-class TestContainsAllInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = ContainsAll(['foo', 'bar'])
-    matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
-    matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
-
-    str_examples = [(
-        "MatchesAll(Contains('foo'), Contains('bar'))",
-        ContainsAll(['foo', 'bar'])),
-        ]
-
-    describe_examples = [("""Differences: [
-'baz' not in 'foo'
-]""",
-    'foo', ContainsAll(['foo', 'baz']))]
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_dict.py b/lib/testtools/testtools/tests/matchers/test_dict.py
deleted file mode 100644
index 00368dd..0000000
--- a/lib/testtools/testtools/tests/matchers/test_dict.py
+++ /dev/null
@@ -1,227 +0,0 @@
-from testtools import TestCase
-from testtools.matchers import (
-    Equals,
-    NotEquals,
-    Not,
-    )
-from testtools.matchers._dict import (
-    ContainedByDict,
-    ContainsDict,
-    KeysEqual,
-    MatchesAllDict,
-    MatchesDict,
-    _SubDictOf,
-    )
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
-    matches_matches = [3, 4]
-    matches_mismatches = [1, 2]
-
-    str_examples = [
-        ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
-         matches_matcher)]
-
-    describe_examples = [
-        ("""a: 1 == 1""", 1, matches_matcher),
-        ]
-
-
-class TestKeysEqualWithList(TestCase, TestMatchersInterface):
-
-    matches_matcher = KeysEqual('foo', 'bar')
-    matches_matches = [
-        {'foo': 0, 'bar': 1},
-        ]
-    matches_mismatches = [
-        {},
-        {'foo': 0},
-        {'bar': 1},
-        {'foo': 0, 'bar': 1, 'baz': 2},
-        {'a': None, 'b': None, 'c': None},
-        ]
-
-    str_examples = [
-        ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
-        ]
-
-    describe_examples = []
-
-    def test_description(self):
-        matchee = {'foo': 0, 'bar': 1, 'baz': 2}
-        mismatch = KeysEqual('foo', 'bar').match(matchee)
-        description = mismatch.describe()
-        self.assertThat(
-            description, Equals(
-                "['bar', 'foo'] does not match %r: Keys not equal"
-                % (matchee,)))
-
-
-class TestKeysEqualWithDict(TestKeysEqualWithList):
-
-    matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
-
-
-class TestSubDictOf(TestCase, TestMatchersInterface):
-
-    matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
-
-    matches_matches = [
-        {'foo': 'bar', 'baz': 'qux'},
-        {'foo': 'bar'},
-        ]
-
-    matches_mismatches = [
-        {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
-        {'foo': 'bar', 'cat': 'dog'},
-        ]
-
-    str_examples = []
-    describe_examples = []
-
-
-class TestMatchesDict(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesDict(
-        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
-
-    matches_matches = [
-        {'foo': 'bar', 'baz': None},
-        {'foo': 'bar', 'baz': 'quux'},
-        ]
-    matches_mismatches = [
-        {},
-        {'foo': 'bar', 'baz': 'qux'},
-        {'foo': 'bop', 'baz': 'qux'},
-        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
-        {'foo': 'bar', 'cat': 'dog'},
-        ]
-
-    str_examples = [
-        ("MatchesDict({'baz': %s, 'foo': %s})" % (
-                Not(Equals('qux')), Equals('bar')),
-         matches_matcher),
-        ]
-
-    describe_examples = [
-        ("Missing: {\n"
-         "  'baz': Not(Equals('qux')),\n"
-         "  'foo': Equals('bar'),\n"
-         "}",
-         {}, matches_matcher),
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "}",
-         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "  'foo': 'bar' != 'bop',\n"
-         "}",
-         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
-        ("Extra: {\n"
-         "  'cat': 'dog',\n"
-         "}",
-         {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
-        ("Extra: {\n"
-         "  'cat': 'dog',\n"
-         "}\n"
-         "Missing: {\n"
-         "  'baz': Not(Equals('qux')),\n"
-         "}",
-         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
-        ]
-
-
-class TestContainsDict(TestCase, TestMatchersInterface):
-
-    matches_matcher = ContainsDict(
-        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
-
-    matches_matches = [
-        {'foo': 'bar', 'baz': None},
-        {'foo': 'bar', 'baz': 'quux'},
-        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
-        ]
-    matches_mismatches = [
-        {},
-        {'foo': 'bar', 'baz': 'qux'},
-        {'foo': 'bop', 'baz': 'qux'},
-        {'foo': 'bar', 'cat': 'dog'},
-        {'foo': 'bar'},
-        ]
-
-    str_examples = [
-        ("ContainsDict({'baz': %s, 'foo': %s})" % (
-                Not(Equals('qux')), Equals('bar')),
-         matches_matcher),
-        ]
-
-    describe_examples = [
-        ("Missing: {\n"
-         "  'baz': Not(Equals('qux')),\n"
-         "  'foo': Equals('bar'),\n"
-         "}",
-         {}, matches_matcher),
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "}",
-         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "  'foo': 'bar' != 'bop',\n"
-         "}",
-         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
-        ("Missing: {\n"
-         "  'baz': Not(Equals('qux')),\n"
-         "}",
-         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
-        ]
-
-
-class TestContainedByDict(TestCase, TestMatchersInterface):
-
-    matches_matcher = ContainedByDict(
-        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
-
-    matches_matches = [
-        {},
-        {'foo': 'bar'},
-        {'foo': 'bar', 'baz': 'quux'},
-        {'baz': 'quux'},
-        ]
-    matches_mismatches = [
-        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
-        {'foo': 'bar', 'baz': 'qux'},
-        {'foo': 'bop', 'baz': 'qux'},
-        {'foo': 'bar', 'cat': 'dog'},
-        ]
-
-    str_examples = [
-        ("ContainedByDict({'baz': %s, 'foo': %s})" % (
-                Not(Equals('qux')), Equals('bar')),
-         matches_matcher),
-        ]
-
-    describe_examples = [
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "}",
-         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
-        ("Differences: {\n"
-         "  'baz': 'qux' matches Equals('qux'),\n"
-         "  'foo': 'bar' != 'bop',\n"
-         "}",
-         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
-        ("Extra: {\n"
-         "  'cat': 'dog',\n"
-         "}",
-         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
-        ]
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_doctest.py b/lib/testtools/testtools/tests/matchers/test_doctest.py
deleted file mode 100644
index 81b9579..0000000
--- a/lib/testtools/testtools/tests/matchers/test_doctest.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import doctest
-
-from testtools import TestCase
-from testtools.compat import (
-    str_is_unicode,
-    _b,
-    _u,
-    )
-from testtools.matchers._doctest import DocTestMatches
-from testtools.tests.helpers import FullStackRunTest
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-
-class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
-    matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
-    matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
-
-    str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
-        DocTestMatches("Ran 1 test in ...s")),
-        ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
-        ]
-
-    describe_examples = [('Expected:\n    Ran 1 tests in ...s\nGot:\n'
-        '    Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
-        DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
-
-
-class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
-
-    matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
-    matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
-    matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
-
-    str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
-        DocTestMatches(_u("\xa7"))),
-        ]
-
-    describe_examples = [(
-        _u("Expected:\n    \xa7\nGot:\n    a\n"),
-        "a",
-        DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
-
-
-class TestDocTestMatchesSpecific(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test___init__simple(self):
-        matcher = DocTestMatches("foo")
-        self.assertEqual("foo\n", matcher.want)
-
-    def test___init__flags(self):
-        matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
-        self.assertEqual("bar\n", matcher.want)
-        self.assertEqual(doctest.ELLIPSIS, matcher.flags)
-
-    def test_describe_non_ascii_bytes(self):
-        """Even with bytestrings, the mismatch should be coercible to unicode
-
-        DocTestMatches is intended for text, but the Python 2 str type also
-        permits arbitrary binary inputs. This is a slightly bogus thing to do,
-        and under Python 3 using bytes objects will reasonably raise an error.
-        """
-        header = _b("\x89PNG\r\n\x1a\n...")
-        if str_is_unicode:
-            self.assertRaises(TypeError,
-                DocTestMatches, header, doctest.ELLIPSIS)
-            return
-        matcher = DocTestMatches(header, doctest.ELLIPSIS)
-        mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
-        # Must be treatable as unicode text, the exact output matters less
-        self.assertTrue(unicode(mismatch.describe()))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_exception.py b/lib/testtools/testtools/tests/matchers/test_exception.py
deleted file mode 100644
index a74043a..0000000
--- a/lib/testtools/testtools/tests/matchers/test_exception.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import sys
-
-from testtools import TestCase
-from testtools.matchers import (
-    AfterPreprocessing,
-    Equals,
-    )
-from testtools.matchers._exception import (
-    MatchesException,
-    Raises,
-    raises,
-    )
-from testtools.tests.helpers import FullStackRunTest
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-def make_error(type, *args, **kwargs):
-    try:
-        raise type(*args, **kwargs)
-    except type:
-        return sys.exc_info()
-
-
-class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesException(ValueError("foo"))
-    error_foo = make_error(ValueError, 'foo')
-    error_bar = make_error(ValueError, 'bar')
-    error_base_foo = make_error(Exception, 'foo')
-    matches_matches = [error_foo]
-    matches_mismatches = [error_bar, error_base_foo]
-
-    str_examples = [
-        ("MatchesException(Exception('foo',))",
-         MatchesException(Exception('foo')))
-        ]
-    describe_examples = [
-        ("%r is not a %r" % (Exception, ValueError),
-         error_base_foo,
-         MatchesException(ValueError("foo"))),
-        ("ValueError('bar',) has different arguments to ValueError('foo',).",
-         error_bar,
-         MatchesException(ValueError("foo"))),
-        ]
-
-
-class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesException(ValueError)
-    error_foo = make_error(ValueError, 'foo')
-    error_sub = make_error(UnicodeError, 'bar')
-    error_base_foo = make_error(Exception, 'foo')
-    matches_matches = [error_foo, error_sub]
-    matches_mismatches = [error_base_foo]
-
-    str_examples = [
-        ("MatchesException(%r)" % Exception,
-         MatchesException(Exception))
-        ]
-    describe_examples = [
-        ("%r is not a %r" % (Exception, ValueError),
-         error_base_foo,
-         MatchesException(ValueError)),
-        ]
-
-
-class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesException(ValueError, 'fo.')
-    error_foo = make_error(ValueError, 'foo')
-    error_sub = make_error(UnicodeError, 'foo')
-    error_bar = make_error(ValueError, 'bar')
-    matches_matches = [error_foo, error_sub]
-    matches_mismatches = [error_bar]
-
-    str_examples = [
-        ("MatchesException(%r)" % Exception,
-         MatchesException(Exception, 'fo.'))
-        ]
-    describe_examples = [
-        ("'bar' does not match /fo./",
-         error_bar, MatchesException(ValueError, "fo.")),
-        ]
-
-
-class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesException(
-        ValueError, AfterPreprocessing(str, Equals('foo')))
-    error_foo = make_error(ValueError, 'foo')
-    error_sub = make_error(UnicodeError, 'foo')
-    error_bar = make_error(ValueError, 'bar')
-    matches_matches = [error_foo, error_sub]
-    matches_mismatches = [error_bar]
-
-    str_examples = [
-        ("MatchesException(%r)" % Exception,
-         MatchesException(Exception, Equals('foo')))
-        ]
-    describe_examples = [
-        ("5 != %r" % (error_bar[1],),
-         error_bar, MatchesException(ValueError, Equals(5))),
-        ]
-
-
-class TestRaisesInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = Raises()
-    def boom():
-        raise Exception('foo')
-    matches_matches = [boom]
-    matches_mismatches = [lambda:None]
-
-    # Tricky to get function objects to render constantly, and the interfaces
-    # helper uses assertEqual rather than (for instance) DocTestMatches.
-    str_examples = []
-
-    describe_examples = []
-
-
-class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = Raises(
-        exception_matcher=MatchesException(Exception('foo')))
-    def boom_bar():
-        raise Exception('bar')
-    def boom_foo():
-        raise Exception('foo')
-    matches_matches = [boom_foo]
-    matches_mismatches = [lambda:None, boom_bar]
-
-    # Tricky to get function objects to render constantly, and the interfaces
-    # helper uses assertEqual rather than (for instance) DocTestMatches.
-    str_examples = []
-
-    describe_examples = []
-
-
-class TestRaisesBaseTypes(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def raiser(self):
-        raise KeyboardInterrupt('foo')
-
-    def test_KeyboardInterrupt_matched(self):
-        # When KeyboardInterrupt is matched, it is swallowed.
-        matcher = Raises(MatchesException(KeyboardInterrupt))
-        self.assertThat(self.raiser, matcher)
-
-    def test_KeyboardInterrupt_propogates(self):
-        # The default 'it raised' propogates KeyboardInterrupt.
-        match_keyb = Raises(MatchesException(KeyboardInterrupt))
-        def raise_keyb_from_match():
-            matcher = Raises()
-            matcher.match(self.raiser)
-        self.assertThat(raise_keyb_from_match, match_keyb)
-
-    def test_KeyboardInterrupt_match_Exception_propogates(self):
-        # If the raised exception isn't matched, and it is not a subclass of
-        # Exception, it is propogated.
-        match_keyb = Raises(MatchesException(KeyboardInterrupt))
-        def raise_keyb_from_match():
-            matcher = Raises(MatchesException(Exception))
-            matcher.match(self.raiser)
-        self.assertThat(raise_keyb_from_match, match_keyb)
-
-
-class TestRaisesConvenience(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_exc_type(self):
-        self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
-
-    def test_exc_value(self):
-        e = RuntimeError("You lose!")
-        def raiser():
-            raise e
-        self.assertThat(raiser, raises(e))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_filesystem.py b/lib/testtools/testtools/tests/matchers/test_filesystem.py
deleted file mode 100644
index 917ff2e..0000000
--- a/lib/testtools/testtools/tests/matchers/test_filesystem.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import os
-import shutil
-import tarfile
-import tempfile
-
-from testtools import TestCase
-from testtools.matchers import (
-    Contains,
-    DocTestMatches,
-    Equals,
-    )
-from testtools.matchers._filesystem import (
-    DirContains,
-    DirExists,
-    FileContains,
-    FileExists,
-    HasPermissions,
-    PathExists,
-    SamePath,
-    TarballContains,
-    )
-
-
-class PathHelpers(object):
-
-    def mkdtemp(self):
-        directory = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, directory)
-        return directory
-
-    def create_file(self, filename, contents=''):
-        fp = open(filename, 'w')
-        try:
-            fp.write(contents)
-        finally:
-            fp.close()
-
-    def touch(self, filename):
-        return self.create_file(filename)
-
-
-class TestPathExists(TestCase, PathHelpers):
-
-    def test_exists(self):
-        tempdir = self.mkdtemp()
-        self.assertThat(tempdir, PathExists())
-
-    def test_not_exists(self):
-        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
-        mismatch = PathExists().match(doesntexist)
-        self.assertThat(
-            "%s does not exist." % doesntexist, Equals(mismatch.describe()))
-
-
-class TestDirExists(TestCase, PathHelpers):
-
-    def test_exists(self):
-        tempdir = self.mkdtemp()
-        self.assertThat(tempdir, DirExists())
-
-    def test_not_exists(self):
-        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
-        mismatch = DirExists().match(doesntexist)
-        self.assertThat(
-            PathExists().match(doesntexist).describe(),
-            Equals(mismatch.describe()))
-
-    def test_not_a_directory(self):
-        filename = os.path.join(self.mkdtemp(), 'foo')
-        self.touch(filename)
-        mismatch = DirExists().match(filename)
-        self.assertThat(
-            "%s is not a directory." % filename, Equals(mismatch.describe()))
-
-
-class TestFileExists(TestCase, PathHelpers):
-
-    def test_exists(self):
-        tempdir = self.mkdtemp()
-        filename = os.path.join(tempdir, 'filename')
-        self.touch(filename)
-        self.assertThat(filename, FileExists())
-
-    def test_not_exists(self):
-        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
-        mismatch = FileExists().match(doesntexist)
-        self.assertThat(
-            PathExists().match(doesntexist).describe(),
-            Equals(mismatch.describe()))
-
-    def test_not_a_file(self):
-        tempdir = self.mkdtemp()
-        mismatch = FileExists().match(tempdir)
-        self.assertThat(
-            "%s is not a file." % tempdir, Equals(mismatch.describe()))
-
-
-class TestDirContains(TestCase, PathHelpers):
-
-    def test_empty(self):
-        tempdir = self.mkdtemp()
-        self.assertThat(tempdir, DirContains([]))
-
-    def test_not_exists(self):
-        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
-        mismatch = DirContains([]).match(doesntexist)
-        self.assertThat(
-            PathExists().match(doesntexist).describe(),
-            Equals(mismatch.describe()))
-
-    def test_contains_files(self):
-        tempdir = self.mkdtemp()
-        self.touch(os.path.join(tempdir, 'foo'))
-        self.touch(os.path.join(tempdir, 'bar'))
-        self.assertThat(tempdir, DirContains(['bar', 'foo']))
-
-    def test_matcher(self):
-        tempdir = self.mkdtemp()
-        self.touch(os.path.join(tempdir, 'foo'))
-        self.touch(os.path.join(tempdir, 'bar'))
-        self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
-
-    def test_neither_specified(self):
-        self.assertRaises(AssertionError, DirContains)
-
-    def test_both_specified(self):
-        self.assertRaises(
-            AssertionError, DirContains, filenames=[], matcher=Contains('a'))
-
-    def test_does_not_contain_files(self):
-        tempdir = self.mkdtemp()
-        self.touch(os.path.join(tempdir, 'foo'))
-        mismatch = DirContains(['bar', 'foo']).match(tempdir)
-        self.assertThat(
-            Equals(['bar', 'foo']).match(['foo']).describe(),
-            Equals(mismatch.describe()))
-
-
-class TestFileContains(TestCase, PathHelpers):
-
-    def test_not_exists(self):
-        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
-        mismatch = FileContains('').match(doesntexist)
-        self.assertThat(
-            PathExists().match(doesntexist).describe(),
-            Equals(mismatch.describe()))
-
-    def test_contains(self):
-        tempdir = self.mkdtemp()
-        filename = os.path.join(tempdir, 'foo')
-        self.create_file(filename, 'Hello World!')
-        self.assertThat(filename, FileContains('Hello World!'))
-
-    def test_matcher(self):
-        tempdir = self.mkdtemp()
-        filename = os.path.join(tempdir, 'foo')
-        self.create_file(filename, 'Hello World!')
-        self.assertThat(
-            filename, FileContains(matcher=DocTestMatches('Hello World!')))
-
-    def test_neither_specified(self):
-        self.assertRaises(AssertionError, FileContains)
-
-    def test_both_specified(self):
-        self.assertRaises(
-            AssertionError, FileContains, contents=[], matcher=Contains('a'))
-
-    def test_does_not_contain(self):
-        tempdir = self.mkdtemp()
-        filename = os.path.join(tempdir, 'foo')
-        self.create_file(filename, 'Goodbye Cruel World!')
-        mismatch = FileContains('Hello World!').match(filename)
-        self.assertThat(
-            Equals('Hello World!').match('Goodbye Cruel World!').describe(),
-            Equals(mismatch.describe()))
-class TestTarballContains(TestCase, PathHelpers):
-
-    def test_match(self):
-        tempdir = self.mkdtemp()
-        in_temp_dir = lambda x: os.path.join(tempdir, x)
-        self.touch(in_temp_dir('a'))
-        self.touch(in_temp_dir('b'))
-        tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
-        tarball.add(in_temp_dir('a'), 'a')
-        tarball.add(in_temp_dir('b'), 'b')
-        tarball.close()
-        self.assertThat(
-            in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
-
-    def test_mismatch(self):
-        tempdir = self.mkdtemp()
-        in_temp_dir = lambda x: os.path.join(tempdir, x)
-        self.touch(in_temp_dir('a'))
-        self.touch(in_temp_dir('b'))
-        tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
-        tarball.add(in_temp_dir('a'), 'a')
-        tarball.add(in_temp_dir('b'), 'b')
-        tarball.close()
-        mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
-        self.assertEqual(
-            mismatch.describe(),
-            Equals(['c', 'd']).match(['a', 'b']).describe())
-
-
-class TestSamePath(TestCase, PathHelpers):
-
-    def test_same_string(self):
-        self.assertThat('foo', SamePath('foo'))
-
-    def test_relative_and_absolute(self):
-        path = 'foo'
-        abspath = os.path.abspath(path)
-        self.assertThat(path, SamePath(abspath))
-        self.assertThat(abspath, SamePath(path))
-
-    def test_real_path(self):
-        tempdir = self.mkdtemp()
-        source = os.path.join(tempdir, 'source')
-        self.touch(source)
-        target = os.path.join(tempdir, 'target')
-        try:
-            os.symlink(source, target)
-        except (AttributeError, NotImplementedError):
-            self.skip("No symlink support")
-        self.assertThat(source, SamePath(target))
-        self.assertThat(target, SamePath(source))
-
-
-class TestHasPermissions(TestCase, PathHelpers):
-
-    def test_match(self):
-        tempdir = self.mkdtemp()
-        filename = os.path.join(tempdir, 'filename')
-        self.touch(filename)
-        permissions = oct(os.stat(filename).st_mode)[-4:]
-        self.assertThat(filename, HasPermissions(permissions))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_higherorder.py b/lib/testtools/testtools/tests/matchers/test_higherorder.py
deleted file mode 100644
index fb86b7f..0000000
--- a/lib/testtools/testtools/tests/matchers/test_higherorder.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
-
-from testtools import TestCase
-from testtools.matchers import (
-    DocTestMatches,
-    Equals,
-    LessThan,
-    MatchesStructure,
-    Mismatch,
-    NotEquals,
-    )
-from testtools.matchers._higherorder import (
-    AfterPreprocessing,
-    AllMatch,
-    Annotate,
-    AnnotatedMismatch,
-    AnyMatch,
-    MatchesAny,
-    MatchesAll,
-    MatchesPredicate,
-    MatchesPredicateWithParams,
-    Not,
-    )
-from testtools.tests.helpers import FullStackRunTest
-from testtools.tests.matchers.helpers import TestMatchersInterface
-
-
-class TestAllMatch(TestCase, TestMatchersInterface):
-
-    matches_matcher = AllMatch(LessThan(10))
-    matches_matches = [
-        [9, 9, 9],
-        (9, 9),
-        iter([9, 9, 9, 9, 9]),
-        ]
-    matches_mismatches = [
-        [11, 9, 9],
-        iter([9, 12, 9, 11]),
-        ]
-
-    str_examples = [
-        ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
-        ]
-
-    describe_examples = [
-        ('Differences: [\n'
-         '10 is not > 11\n'
-         '10 is not > 10\n'
-         ']',
-         [11, 9, 10],
-         AllMatch(LessThan(10))),
-        ]
-
-
-class TestAnyMatch(TestCase, TestMatchersInterface):
-
-    matches_matcher = AnyMatch(Equals('elephant'))
-    matches_matches = [
-        ['grass', 'cow', 'steak', 'milk', 'elephant'],
-        (13, 'elephant'),
-        ['elephant', 'elephant', 'elephant'],
-        set(['hippo', 'rhino', 'elephant']),
-        ]
-    matches_mismatches = [
-        [],
-        ['grass', 'cow', 'steak', 'milk'],
-        (13, 12, 10),
-        ['element', 'hephalump', 'pachyderm'],
-        set(['hippo', 'rhino', 'diplodocus']),
-        ]
-
-    str_examples = [
-        ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
-        ]
-
-    describe_examples = [
-        ('Differences: [\n'
-         '7 != 11\n'
-         '7 != 9\n'
-         '7 != 10\n'
-         ']',
-         [11, 9, 10],
-         AnyMatch(Equals(7))),
-        ]
-
-
-class TestAfterPreprocessing(TestCase, TestMatchersInterface):
-
-    def parity(x):
-        return x % 2
-
-    matches_matcher = AfterPreprocessing(parity, Equals(1))
-    matches_matches = [3, 5]
-    matches_mismatches = [2]
-
-    str_examples = [
-        ("AfterPreprocessing(<function parity>, Equals(1))",
-         AfterPreprocessing(parity, Equals(1))),
-        ]
-
-    describe_examples = [
-        ("1 != 0: after <function parity> on 2", 2,
-         AfterPreprocessing(parity, Equals(1))),
-        ("1 != 0", 2,
-         AfterPreprocessing(parity, Equals(1), annotate=False)),
-        ]
-
-class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
-    matches_matches = ["1", "2"]
-    matches_mismatches = ["3"]
-
-    str_examples = [(
-        "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
-        MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
-        ]
-
-    describe_examples = [("""Differences: [
-Expected:
-    1
-Got:
-    3
-
-Expected:
-    2
-Got:
-    3
-
-]""",
-        "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
-
-
-class TestMatchesAllInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
-    matches_matches = [3, 4]
-    matches_mismatches = [1, 2]
-
-    str_examples = [
-        ("MatchesAll(NotEquals(1), NotEquals(2))",
-         MatchesAll(NotEquals(1), NotEquals(2)))]
-
-    describe_examples = [
-        ("""Differences: [
-1 == 1
-]""",
-         1, MatchesAll(NotEquals(1), NotEquals(2))),
-        ("1 == 1", 1,
-         MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
-        ]
-
-
-class TestAnnotate(TestCase, TestMatchersInterface):
-
-    matches_matcher = Annotate("foo", Equals(1))
-    matches_matches = [1]
-    matches_mismatches = [2]
-
-    str_examples = [
-        ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
-
-    describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
-
-    def test_if_message_no_message(self):
-        # Annotate.if_message returns the given matcher if there is no
-        # message.
-        matcher = Equals(1)
-        not_annotated = Annotate.if_message('', matcher)
-        self.assertIs(matcher, not_annotated)
-
-    def test_if_message_given_message(self):
-        # Annotate.if_message returns an annotated version of the matcher if a
-        # message is provided.
-        matcher = Equals(1)
-        expected = Annotate('foo', matcher)
-        annotated = Annotate.if_message('foo', matcher)
-        self.assertThat(
-            annotated,
-            MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
-
-
-class TestAnnotatedMismatch(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_forwards_details(self):
-        x = Mismatch('description', {'foo': 'bar'})
-        annotated = AnnotatedMismatch("annotation", x)
-        self.assertEqual(x.get_details(), annotated.get_details())
-
-
-class TestNotInterface(TestCase, TestMatchersInterface):
-
-    matches_matcher = Not(Equals(1))
-    matches_matches = [2]
-    matches_mismatches = [1]
-
-    str_examples = [
-        ("Not(Equals(1))", Not(Equals(1))),
-        ("Not(Equals('1'))", Not(Equals('1')))]
-
-    describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
-
-
-def is_even(x):
-    return x % 2 == 0
-
-
-class TestMatchesPredicate(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesPredicate(is_even, "%s is not even")
-    matches_matches = [2, 4, 6, 8]
-    matches_mismatches = [3, 5, 7, 9]
-
-    str_examples = [
-        ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
-         MatchesPredicate(is_even, "%s is not even")),
-        ]
-
-    describe_examples = [
-        ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
-        ]
-
-
-def between(x, low, high):
-    return low < x < high
-
-
-class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
-
-    matches_matcher = MatchesPredicateWithParams(
-        between, "{0} is not between {1} and {2}")(1, 9)
-    matches_matches = [2, 4, 6, 8]
-    matches_mismatches = [0, 1, 9, 10]
-
-    str_examples = [
-        ("MatchesPredicateWithParams(%r, %r)(%s)" % (
-            between, "{0} is not between {1} and {2}", "1, 2"),
-         MatchesPredicateWithParams(
-            between, "{0} is not between {1} and {2}")(1, 2)),
-        ("Between(1, 2)", MatchesPredicateWithParams(
-            between, "{0} is not between {1} and {2}", "Between")(1, 2)),
-        ]
-
-    describe_examples = [
-        ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
-            between, "{0} is not between {1} and {2}")(2, 3)),
-        ]
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/matchers/test_impl.py b/lib/testtools/testtools/tests/matchers/test_impl.py
deleted file mode 100644
index 10967ea..0000000
--- a/lib/testtools/testtools/tests/matchers/test_impl.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Tests for matchers."""
-
-from testtools import (
-    Matcher, # check that Matcher is exposed at the top level for docs.
-    TestCase,
-    )
-from testtools.compat import (
-    str_is_unicode,
-    text_repr,
-    _u,
-    )
-from testtools.matchers import (
-    Equals,
-    MatchesException,
-    Raises,
-    )
-from testtools.matchers._impl import (
-    Mismatch,
-    MismatchDecorator,
-    MismatchError,
-    )
-from testtools.tests.helpers import FullStackRunTest
-
-# Silence pyflakes.
-Matcher
-
-
-class TestMismatch(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_constructor_arguments(self):
-        mismatch = Mismatch("some description", {'detail': "things"})
-        self.assertEqual("some description", mismatch.describe())
-        self.assertEqual({'detail': "things"}, mismatch.get_details())
-
-    def test_constructor_no_arguments(self):
-        mismatch = Mismatch()
-        self.assertThat(mismatch.describe,
-            Raises(MatchesException(NotImplementedError)))
-        self.assertEqual({}, mismatch.get_details())
-
-
-class TestMismatchError(TestCase):
-
-    def test_is_assertion_error(self):
-        # MismatchError is an AssertionError, so that most of the time, it
-        # looks like a test failure, rather than an error.
-        def raise_mismatch_error():
-            raise MismatchError(2, Equals(3), Equals(3).match(2))
-        self.assertRaises(AssertionError, raise_mismatch_error)
-
-    def test_default_description_is_mismatch(self):
-        mismatch = Equals(3).match(2)
-        e = MismatchError(2, Equals(3), mismatch)
-        self.assertEqual(mismatch.describe(), str(e))
-
-    def test_default_description_unicode(self):
-        matchee = _u('\xa7')
-        matcher = Equals(_u('a'))
-        mismatch = matcher.match(matchee)
-        e = MismatchError(matchee, matcher, mismatch)
-        self.assertEqual(mismatch.describe(), str(e))
-
-    def test_verbose_description(self):
-        matchee = 2
-        matcher = Equals(3)
-        mismatch = matcher.match(2)
-        e = MismatchError(matchee, matcher, mismatch, True)
-        expected = (
-            'Match failed. Matchee: %r\n'
-            'Matcher: %s\n'
-            'Difference: %s\n' % (
-                matchee,
-                matcher,
-                matcher.match(matchee).describe(),
-                ))
-        self.assertEqual(expected, str(e))
-
-    def test_verbose_unicode(self):
-        # When assertThat is given matchees or matchers that contain non-ASCII
-        # unicode strings, we can still provide a meaningful error.
-        matchee = _u('\xa7')
-        matcher = Equals(_u('a'))
-        mismatch = matcher.match(matchee)
-        expected = (
-            'Match failed. Matchee: %s\n'
-            'Matcher: %s\n'
-            'Difference: %s\n' % (
-                text_repr(matchee),
-                matcher,
-                mismatch.describe(),
-                ))
-        e = MismatchError(matchee, matcher, mismatch, True)
-        if str_is_unicode:
-            actual = str(e)
-        else:
-            actual = unicode(e)
-            # Using str() should still work, and return ascii only
-            self.assertEqual(
-                expected.replace(matchee, matchee.encode("unicode-escape")),
-                str(e).decode("ascii"))
-        self.assertEqual(expected, actual)
-
-
-class TestMismatchDecorator(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def test_forwards_description(self):
-        x = Mismatch("description", {'foo': 'bar'})
-        decorated = MismatchDecorator(x)
-        self.assertEqual(x.describe(), decorated.describe())
-
-    def test_forwards_details(self):
-        x = Mismatch("description", {'foo': 'bar'})
-        decorated = MismatchDecorator(x)
-        self.assertEqual(x.get_details(), decorated.get_details())
-
-    def test_repr(self):
-        x = Mismatch("description", {'foo': 'bar'})
-        decorated = MismatchDecorator(x)
-        self.assertEqual(
-            '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
-            repr(decorated))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_assert_that.py b/lib/testtools/testtools/tests/test_assert_that.py
deleted file mode 100644
index 66b4166..0000000
--- a/lib/testtools/testtools/tests/test_assert_that.py
+++ /dev/null
@@ -1,152 +0,0 @@
-from doctest import ELLIPSIS
-
-from testtools import (
-    TestCase,
-    )
-from testtools.assertions import (
-    assert_that,
-    )
-from testtools.compat import (
-    _u,
-    )
-from testtools.content import (
-    TracebackContent,
-    )
-from testtools.matchers import (
-    Annotate,
-    DocTestMatches,
-    Equals,
-    )
-
-
-class AssertThatTests(object):
-    """A mixin containing shared tests for assertThat and assert_that."""
-
-    def assert_that_callable(self, *args, **kwargs):
-        raise NotImplementedError
-
-    def assertFails(self, message, function, *args, **kwargs):
-        """Assert that function raises a failure with the given message."""
-        failure = self.assertRaises(
-            self.failureException, function, *args, **kwargs)
-        self.assert_that_callable(failure, DocTestMatches(message, ELLIPSIS))
-
-    def test_assertThat_matches_clean(self):
-        class Matcher(object):
-            def match(self, foo):
-                return None
-        self.assert_that_callable("foo", Matcher())
-
-    def test_assertThat_mismatch_raises_description(self):
-        calls = []
-        class Mismatch(object):
-            def __init__(self, thing):
-                self.thing = thing
-            def describe(self):
-                calls.append(('describe_diff', self.thing))
-                return "object is not a thing"
-            def get_details(self):
-                return {}
-        class Matcher(object):
-            def match(self, thing):
-                calls.append(('match', thing))
-                return Mismatch(thing)
-            def __str__(self):
-                calls.append(('__str__',))
-                return "a description"
-        class Test(type(self)):
-            def test(self):
-                self.assert_that_callable("foo", Matcher())
-        result = Test("test").run()
-        self.assertEqual([
-            ('match', "foo"),
-            ('describe_diff', "foo"),
-            ], calls)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_assertThat_output(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = matcher.match(matchee).describe()
-        self.assertFails(expected, self.assert_that_callable, matchee, matcher)
-
-    def test_assertThat_message_is_annotated(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = Annotate('woo', matcher).match(matchee).describe()
-        self.assertFails(expected,
-                         self.assert_that_callable, matchee, matcher, 'woo')
-
-    def test_assertThat_verbose_output(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = (
-            'Match failed. Matchee: %r\n'
-            'Matcher: %s\n'
-            'Difference: %s\n' % (
-                matchee,
-                matcher,
-                matcher.match(matchee).describe(),
-                ))
-        self.assertFails(
-            expected,
-            self.assert_that_callable, matchee, matcher, verbose=True)
-
-    def get_error_string(self, e):
-        """Get the string showing how 'e' would be formatted in test output.
-
-        This is a little bit hacky, since it's designed to give consistent
-        output regardless of Python version.
-
-        In testtools, TestResult._exc_info_to_unicode is the point of dispatch
-        between various different implementations of methods that format
-        exceptions, so that's what we have to call. However, that method cares
-        about stack traces and formats the exception class. We don't care
-        about either of these, so we take its output and parse it a little.
-        """
-        error = TracebackContent((e.__class__, e, None), self).as_text()
-        # We aren't at all interested in the traceback.
-        if error.startswith('Traceback (most recent call last):\n'):
-            lines = error.splitlines(True)[1:]
-            for i, line in enumerate(lines):
-                if not line.startswith(' '):
-                    break
-            error = ''.join(lines[i:])
-        # We aren't interested in how the exception type is formatted.
-        exc_class, error = error.split(': ', 1)
-        return error
-
-    def test_assertThat_verbose_unicode(self):
-        # When assertThat is given matchees or matchers that contain non-ASCII
-        # unicode strings, we can still provide a meaningful error.
-        matchee = _u('\xa7')
-        matcher = Equals(_u('a'))
-        expected = (
-            'Match failed. Matchee: %s\n'
-            'Matcher: %s\n'
-            'Difference: %s\n\n' % (
-                repr(matchee).replace("\\xa7", matchee),
-                matcher,
-                matcher.match(matchee).describe(),
-                ))
-        e = self.assertRaises(
-            self.failureException, self.assert_that_callable, matchee, matcher,
-            verbose=True)
-        self.assertEqual(expected, self.get_error_string(e))
-
-
-class TestAssertThatFunction(AssertThatTests, TestCase):
-
-    def assert_that_callable(self, *args, **kwargs):
-        return assert_that(*args, **kwargs)
-
-
-class TestAssertThatMethod(AssertThatTests, TestCase):
-
-    def assert_that_callable(self, *args, **kwargs):
-        return self.assertThat(*args, **kwargs)
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_compat.py b/lib/testtools/testtools/tests/test_compat.py
deleted file mode 100644
index 84e57be..0000000
--- a/lib/testtools/testtools/tests/test_compat.py
+++ /dev/null
@@ -1,603 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Tests for miscellaneous compatibility functions"""
-
-import io
-import linecache
-import os
-import sys
-import tempfile
-import traceback
-
-import testtools
-
-from testtools.compat import (
-    _b,
-    _detect_encoding,
-    _format_exc_info,
-    _format_exception_only,
-    _format_stack_list,
-    _get_source_encoding,
-    _u,
-    reraise,
-    str_is_unicode,
-    text_repr,
-    unicode_output_stream,
-    )
-from testtools.matchers import (
-    Equals,
-    Is,
-    IsInstance,
-    MatchesException,
-    Not,
-    Raises,
-    )
-
-
-class TestDetectEncoding(testtools.TestCase):
-    """Test detection of Python source encodings"""
-
-    def _check_encoding(self, expected, lines, possibly_invalid=False):
-        """Check lines are valid Python and encoding is as expected"""
-        if not possibly_invalid:
-            compile(_b("".join(lines)), "<str>", "exec")
-        encoding = _detect_encoding(lines)
-        self.assertEqual(expected, encoding,
-            "Encoding %r expected but got %r from lines %r" %
-                (expected, encoding, lines))
-
-    def test_examples_from_pep(self):
-        """Check the examples given in PEP 263 all work as specified
-
-        See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
-        """
-        # With interpreter binary and using Emacs style file encoding comment:
-        self._check_encoding("latin-1", (
-            "#!/usr/bin/python\n",
-            "# -*- coding: latin-1 -*-\n",
-            "import os, sys\n"))
-        self._check_encoding("iso-8859-15", (
-            "#!/usr/bin/python\n",
-            "# -*- coding: iso-8859-15 -*-\n",
-            "import os, sys\n"))
-        self._check_encoding("ascii", (
-            "#!/usr/bin/python\n",
-            "# -*- coding: ascii -*-\n",
-            "import os, sys\n"))
-        # Without interpreter line, using plain text:
-        self._check_encoding("utf-8", (
-            "# This Python file uses the following encoding: utf-8\n",
-            "import os, sys\n"))
-        # Text editors might have different ways of defining the file's
-        # encoding, e.g.
-        self._check_encoding("latin-1", (
-            "#!/usr/local/bin/python\n",
-            "# coding: latin-1\n",
-            "import os, sys\n"))
-        # Without encoding comment, Python's parser will assume ASCII text:
-        self._check_encoding("ascii", (
-            "#!/usr/local/bin/python\n",
-            "import os, sys\n"))
-        # Encoding comments which don't work:
-        #   Missing "coding:" prefix:
-        self._check_encoding("ascii", (
-            "#!/usr/local/bin/python\n",
-            "# latin-1\n",
-            "import os, sys\n"))
-        #   Encoding comment not on line 1 or 2:
-        self._check_encoding("ascii", (
-            "#!/usr/local/bin/python\n",
-            "#\n",
-            "# -*- coding: latin-1 -*-\n",
-            "import os, sys\n"))
-        #   Unsupported encoding:
-        self._check_encoding("ascii", (
-            "#!/usr/local/bin/python\n",
-            "# -*- coding: utf-42 -*-\n",
-            "import os, sys\n"),
-            possibly_invalid=True)
-
-    def test_bom(self):
-        """Test the UTF-8 BOM counts as an encoding declaration"""
-        self._check_encoding("utf-8", (
-            "\xef\xbb\xbfimport sys\n",
-            ))
-        self._check_encoding("utf-8", (
-            "\xef\xbb\xbf# File encoding: utf-8\n",
-            ))
-        self._check_encoding("utf-8", (
-            '\xef\xbb\xbf"""Module docstring\n',
-            '\xef\xbb\xbfThat should just be a ZWNB"""\n'))
-        self._check_encoding("latin-1", (
-            '"""Is this coding: latin-1 or coding: utf-8 instead?\n',
-            '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
-        self._check_encoding("utf-8", (
-            "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
-            '"""Module docstring say \xe2\x98\x86"""\n'),
-            possibly_invalid=True)
-
-    def test_multiple_coding_comments(self):
-        """Test only the first of multiple coding declarations counts"""
-        self._check_encoding("iso-8859-1", (
-            "# Is the coding: iso-8859-1\n",
-            "# Or is it coding: iso-8859-2\n"),
-            possibly_invalid=True)
-        self._check_encoding("iso-8859-1", (
-            "#!/usr/bin/python\n",
-            "# Is the coding: iso-8859-1\n",
-            "# Or is it coding: iso-8859-2\n"))
-        self._check_encoding("iso-8859-1", (
-            "# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
-            "# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
-            possibly_invalid=True)
-        self._check_encoding("iso-8859-2", (
-            "# Is the coding iso-8859-1 or coding: iso-8859-2\n",
-            "# Spot the missing colon above\n"))
-
-
-class TestGetSourceEncoding(testtools.TestCase):
-    """Test reading and caching the encodings of source files"""
-
-    def setUp(self):
-        testtools.TestCase.setUp(self)
-        dir = tempfile.mkdtemp()
-        self.addCleanup(os.rmdir, dir)
-        self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
-        self._written = False
-
-    def put_source(self, text):
-        f = open(self.filename, "w")
-        try:
-            f.write(text)
-        finally:
-            f.close()
-            if not self._written:
-                self._written = True
-                self.addCleanup(os.remove, self.filename)
-                self.addCleanup(linecache.cache.pop, self.filename, None)
-
-    def test_nonexistant_file_as_ascii(self):
-        """When file can't be found, the encoding should default to ascii"""
-        self.assertEquals("ascii", _get_source_encoding(self.filename))
-
-    def test_encoding_is_cached(self):
-        """The encoding should stay the same if the cache isn't invalidated"""
-        self.put_source(
-            "# coding: iso-8859-13\n"
-            "import os\n")
-        self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
-        self.put_source(
-            "# coding: rot-13\n"
-            "vzcbeg bf\n")
-        self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
-
-    def test_traceback_rechecks_encoding(self):
-        """A traceback function checks the cache and resets the encoding"""
-        self.put_source(
-            "# coding: iso-8859-8\n"
-            "import os\n")
-        self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
-        self.put_source(
-            "# coding: utf-8\n"
-            "import os\n")
-        try:
-            exec (compile("raise RuntimeError\n", self.filename, "exec"))
-        except RuntimeError:
-            traceback.extract_tb(sys.exc_info()[2])
-        else:
-            self.fail("RuntimeError not raised")
-        self.assertEquals("utf-8", _get_source_encoding(self.filename))
-
-
-class _FakeOutputStream(object):
-    """A simple file-like object for testing"""
-
-    def __init__(self):
-        self.writelog = []
-
-    def write(self, obj):
-        self.writelog.append(obj)
-
-
-class TestUnicodeOutputStream(testtools.TestCase):
-    """Test wrapping output streams so they work with arbitrary unicode"""
-
-    uni = _u("pa\u026a\u03b8\u0259n")
-
-    def setUp(self):
-        super(TestUnicodeOutputStream, self).setUp()
-        if sys.platform == "cli":
-            self.skip("IronPython shouldn't wrap streams to do encoding")
-
-    def test_no_encoding_becomes_ascii(self):
-        """A stream with no encoding attribute gets ascii/replace strings"""
-        sout = _FakeOutputStream()
-        unicode_output_stream(sout).write(self.uni)
-        self.assertEqual([_b("pa???n")], sout.writelog)
-
-    def test_encoding_as_none_becomes_ascii(self):
-        """A stream with encoding value of None gets ascii/replace strings"""
-        sout = _FakeOutputStream()
-        sout.encoding = None
-        unicode_output_stream(sout).write(self.uni)
-        self.assertEqual([_b("pa???n")], sout.writelog)
-
-    def test_bogus_encoding_becomes_ascii(self):
-        """A stream with a bogus encoding gets ascii/replace strings"""
-        sout = _FakeOutputStream()
-        sout.encoding = "bogus"
-        unicode_output_stream(sout).write(self.uni)
-        self.assertEqual([_b("pa???n")], sout.writelog)
-
-    def test_partial_encoding_replace(self):
-        """A string which can be partly encoded correctly should be"""
-        sout = _FakeOutputStream()
-        sout.encoding = "iso-8859-7"
-        unicode_output_stream(sout).write(self.uni)
-        self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
-
-    @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode")
-    def test_unicode_encodings_wrapped_when_str_is_not_unicode(self):
-        """A unicode encoding is wrapped but needs no error handler"""
-        sout = _FakeOutputStream()
-        sout.encoding = "utf-8"
-        uout = unicode_output_stream(sout)
-        self.assertEqual(uout.errors, "strict")
-        uout.write(self.uni)
-        self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog)
-
-    @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode")
-    def test_unicode_encodings_not_wrapped_when_str_is_unicode(self):
-        # No wrapping needed if native str type is unicode
-        sout = _FakeOutputStream()
-        sout.encoding = "utf-8"
-        uout = unicode_output_stream(sout)
-        self.assertIs(uout, sout)
-
-    def test_stringio(self):
-        """A StringIO object should maybe get an ascii native str type"""
-        try:
-            from cStringIO import StringIO
-            newio = False
-        except ImportError:
-            from io import StringIO
-            newio = True
-        sout = StringIO()
-        soutwrapper = unicode_output_stream(sout)
-        soutwrapper.write(self.uni)
-        if newio:
-            self.assertEqual(self.uni, sout.getvalue())
-        else:
-            self.assertEqual("pa???n", sout.getvalue())
-
-    def test_io_stringio(self):
-        # io.StringIO only accepts unicode so should be returned as itself.
-        s = io.StringIO()
-        self.assertEqual(s, unicode_output_stream(s))
-
-    def test_io_bytesio(self):
-        # io.BytesIO only accepts bytes so should be wrapped.
-        bytes_io = io.BytesIO()
-        self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
-        # Will error if s was not wrapped properly.
-        unicode_output_stream(bytes_io).write(_u('foo'))
-
-    def test_io_textwrapper(self):
-        # textwrapper is unicode, should be returned as itself.
-        text_io = io.TextIOWrapper(io.BytesIO())
-        self.assertThat(unicode_output_stream(text_io), Is(text_io))
-        # To be sure...
-        unicode_output_stream(text_io).write(_u('foo'))
-
-
-class TestTextRepr(testtools.TestCase):
-    """Ensure in extending repr, basic behaviours are not being broken"""
-
-    ascii_examples = (
-        # Single character examples
-        #  C0 control codes should be escaped except multiline \n
-        ("\x00", "'\\x00'", "'''\\\n\\x00'''"),
-        ("\b", "'\\x08'", "'''\\\n\\x08'''"),
-        ("\t", "'\\t'", "'''\\\n\\t'''"),
-        ("\n", "'\\n'", "'''\\\n\n'''"),
-        ("\r", "'\\r'", "'''\\\n\\r'''"),
-        #  Quotes and backslash should match normal repr behaviour
-        ('"', "'\"'", "'''\\\n\"'''"),
-        ("'", "\"'\"", "'''\\\n\\''''"),
-        ("\\", "'\\\\'", "'''\\\n\\\\'''"),
-        #  DEL is also unprintable and should be escaped
-        ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"),
-
-        # Character combinations that need double checking
-        ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"),
-        ("\"'", "'\"\\''", "'''\\\n\"\\''''"),
-        ("'\"", "'\\'\"'", "'''\\\n'\"'''"),
-        ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"),
-        ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"),
-        ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"),
-        ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"),
-        ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"),
-        ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"),
-        )
-
-    # Bytes with the high bit set should always be escaped
-    bytes_examples = (
-        (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
-        (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
-        (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"),
-        (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"),
-        (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"),
-        )
-
-    # Unicode doesn't escape printable characters as per the Python 3 model
-    unicode_examples = (
-        # C1 codes are unprintable
-        (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
-        (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"),
-        # No-break space is unprintable
-        (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
-        # Letters latin alphabets are printable
-        (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")),
-        (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")),
-        (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")),
-        # Line and paragraph seperators are unprintable
-        (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"),
-        (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"),
-        # Unpaired surrogates are unprintable
-        (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"),
-        (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"),
-        # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs
-        )
-
-    b_prefix = repr(_b(""))[:-2]
-    u_prefix = repr(_u(""))[:-2]
-
-    def test_ascii_examples_oneline_bytes(self):
-        for s, expected, _ in self.ascii_examples:
-            b = _b(s)
-            actual = text_repr(b, multiline=False)
-            # Add self.assertIsInstance check?
-            self.assertEqual(actual, self.b_prefix + expected)
-            self.assertEqual(eval(actual), b)
-
-    def test_ascii_examples_oneline_unicode(self):
-        for s, expected, _ in self.ascii_examples:
-            u = _u(s)
-            actual = text_repr(u, multiline=False)
-            self.assertEqual(actual, self.u_prefix + expected)
-            self.assertEqual(eval(actual), u)
-
-    def test_ascii_examples_multiline_bytes(self):
-        for s, _, expected in self.ascii_examples:
-            b = _b(s)
-            actual = text_repr(b, multiline=True)
-            self.assertEqual(actual, self.b_prefix + expected)
-            self.assertEqual(eval(actual), b)
-
-    def test_ascii_examples_multiline_unicode(self):
-        for s, _, expected in self.ascii_examples:
-            u = _u(s)
-            actual = text_repr(u, multiline=True)
-            self.assertEqual(actual, self.u_prefix + expected)
-            self.assertEqual(eval(actual), u)
-
-    def test_ascii_examples_defaultline_bytes(self):
-        for s, one, multi in self.ascii_examples:
-            expected = "\n" in s and multi or one
-            self.assertEqual(text_repr(_b(s)), self.b_prefix + expected)
-
-    def test_ascii_examples_defaultline_unicode(self):
-        for s, one, multi in self.ascii_examples:
-            expected = "\n" in s and multi or one
-            self.assertEqual(text_repr(_u(s)), self.u_prefix + expected)
-
-    def test_bytes_examples_oneline(self):
-        for b, expected, _ in self.bytes_examples:
-            actual = text_repr(b, multiline=False)
-            self.assertEqual(actual, self.b_prefix + expected)
-            self.assertEqual(eval(actual), b)
-
-    def test_bytes_examples_multiline(self):
-        for b, _, expected in self.bytes_examples:
-            actual = text_repr(b, multiline=True)
-            self.assertEqual(actual, self.b_prefix + expected)
-            self.assertEqual(eval(actual), b)
-
-    def test_unicode_examples_oneline(self):
-        for u, expected, _ in self.unicode_examples:
-            actual = text_repr(u, multiline=False)
-            self.assertEqual(actual, self.u_prefix + expected)
-            self.assertEqual(eval(actual), u)
-
-    def test_unicode_examples_multiline(self):
-        for u, _, expected in self.unicode_examples:
-            actual = text_repr(u, multiline=True)
-            self.assertEqual(actual, self.u_prefix + expected)
-            self.assertEqual(eval(actual), u)
-
-
-
-class TestReraise(testtools.TestCase):
-    """Tests for trivial reraise wrapper needed for Python 2/3 changes"""
-
-    def test_exc_info(self):
-        """After reraise exc_info matches plus some extra traceback"""
-        try:
-            raise ValueError("Bad value")
-        except ValueError:
-            _exc_info = sys.exc_info()
-        try:
-            reraise(*_exc_info)
-        except ValueError:
-            _new_exc_info = sys.exc_info()
-        self.assertIs(_exc_info[0], _new_exc_info[0])
-        self.assertIs(_exc_info[1], _new_exc_info[1])
-        expected_tb = traceback.extract_tb(_exc_info[2])
-        self.assertEqual(expected_tb,
-            traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
-
-    def test_custom_exception_no_args(self):
-        """Reraising does not require args attribute to contain params"""
-
-        class CustomException(Exception):
-            """Exception that expects and sets attrs but not args"""
-
-            def __init__(self, value):
-                Exception.__init__(self)
-                self.value = value
-
-        try:
-            raise CustomException("Some value")
-        except CustomException:
-            _exc_info = sys.exc_info()
-        self.assertRaises(CustomException, reraise, *_exc_info)
-
-
-class Python2CompatibilityTests(testtools.TestCase):
-
-    def setUp(self):
-        super(Python2CompatibilityTests, self).setUp()
-        if sys.version[0] >= '3':
-            self.skip("These tests are only applicable to python 2.")
-
-
-class TestExceptionFormatting(Python2CompatibilityTests):
-    """Test the _format_exception_only function."""
-
-    def _assert_exception_format(self, eclass, evalue, expected):
-        actual = _format_exception_only(eclass, evalue)
-        self.assertThat(actual, Equals(expected))
-        self.assertThat(''.join(actual), IsInstance(unicode))
-
-    def test_supports_string_exception(self):
-        self._assert_exception_format(
-            "String_Exception",
-            None,
-            [_u("String_Exception\n")]
-        )
-
-    def test_supports_regular_exception(self):
-        self._assert_exception_format(
-            RuntimeError,
-            RuntimeError("Something went wrong"),
-            [_u("RuntimeError: Something went wrong\n")]
-        )
-
-    def test_supports_unprintable_exceptions(self):
-        """Verify support for exception classes that raise an exception when
-        __unicode__ or __str__ is called.
-        """
-        class UnprintableException(Exception):
-
-            def __str__(self):
-                raise Exception()
-
-            def __unicode__(self):
-                raise Exception()
-
-        self._assert_exception_format(
-            UnprintableException,
-            UnprintableException("Foo"),
-            [_u("UnprintableException: <unprintable UnprintableException object>\n")]
-        )
-
-    def test_supports_exceptions_with_no_string_value(self):
-        class NoStringException(Exception):
-
-            def __str__(self):
-                return ""
-
-            def __unicode__(self):
-                return _u("")
-
-        self._assert_exception_format(
-            NoStringException,
-            NoStringException("Foo"),
-            [_u("NoStringException\n")]
-        )
-
-    def test_supports_strange_syntax_error(self):
-        """Test support for syntax errors with unusual number of arguments"""
-        self._assert_exception_format(
-            SyntaxError,
-            SyntaxError("Message"),
-            [_u("SyntaxError: Message\n")]
-        )
-
-    def test_supports_syntax_error(self):
-        self._assert_exception_format(
-            SyntaxError,
-            SyntaxError(
-                "Some Syntax Message",
-                (
-                    "/path/to/file",
-                    12,
-                    2,
-                    "This is the line of code",
-                )
-            ),
-            [
-                _u('  File "/path/to/file", line 12\n'),
-                _u('    This is the line of code\n'),
-                _u('     ^\n'),
-                _u('SyntaxError: Some Syntax Message\n'),
-            ]
-        )
-
-
-class StackListFormattingTests(Python2CompatibilityTests):
-    """Test the _format_stack_list function."""
-
-    def _assert_stack_format(self, stack_lines, expected_output):
-        actual = _format_stack_list(stack_lines)
-        self.assertThat(actual, Equals([expected_output]))
-
-    def test_single_complete_stack_line(self):
-        stack_lines = [(
-            '/path/to/filename',
-            12,
-            'func_name',
-            'some_code()',
-        )]
-        expected = \
-            _u('  File "/path/to/filename", line 12, in func_name\n' \
-               '    some_code()\n')
-
-        self._assert_stack_format(stack_lines, expected)
-
-    def test_single_stack_line_no_code(self):
-        stack_lines = [(
-            '/path/to/filename',
-            12,
-            'func_name',
-            None
-        )]
-        expected = _u('  File "/path/to/filename", line 12, in func_name\n')
-        self._assert_stack_format(stack_lines, expected)
-
-
-class FormatExceptionInfoTests(Python2CompatibilityTests):
-
-    def test_individual_functions_called(self):
-        self.patch(
-            testtools.compat,
-            '_format_stack_list',
-            lambda stack_list: [_u("format stack list called\n")]
-        )
-        self.patch(
-            testtools.compat,
-            '_format_exception_only',
-            lambda etype, evalue: [_u("format exception only called\n")]
-        )
-        result = _format_exc_info(None, None, None)
-        expected = [
-            _u("Traceback (most recent call last):\n"),
-            _u("format stack list called\n"),
-            _u("format exception only called\n"),
-        ]
-        self.assertThat(expected, Equals(result))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_content.py b/lib/testtools/testtools/tests/test_content.py
deleted file mode 100644
index 09feebd..0000000
--- a/lib/testtools/testtools/tests/test_content.py
+++ /dev/null
@@ -1,366 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-import json
-import os
-import tempfile
-import unittest
-
-from testtools import TestCase, skipUnless
-from testtools.compat import (
-    _b,
-    _u,
-    BytesIO,
-    StringIO,
-    str_is_unicode,
-    )
-from testtools.content import (
-    attach_file,
-    Content,
-    content_from_file,
-    content_from_stream,
-    JSON,
-    json_content,
-    StackLinesContent,
-    StacktraceContent,
-    TracebackContent,
-    text_content,
-    )
-from testtools.content_type import (
-    ContentType,
-    UTF8_TEXT,
-    )
-from testtools.matchers import (
-    Equals,
-    MatchesException,
-    Raises,
-    raises,
-    )
-from testtools.tests.helpers import an_exc_info
-
-
-raises_value_error = Raises(MatchesException(ValueError))
-
-
-class TestContent(TestCase):
-
-    def test___init___None_errors(self):
-        self.assertThat(lambda: Content(None, None), raises_value_error)
-        self.assertThat(
-            lambda: Content(None, lambda: ["traceback"]), raises_value_error)
-        self.assertThat(
-            lambda: Content(ContentType("text", "traceback"), None),
-            raises_value_error)
-
-    def test___init___sets_ivars(self):
-        content_type = ContentType("foo", "bar")
-        content = Content(content_type, lambda: ["bytes"])
-        self.assertEqual(content_type, content.content_type)
-        self.assertEqual(["bytes"], list(content.iter_bytes()))
-
-    def test___eq__(self):
-        content_type = ContentType("foo", "bar")
-        one_chunk = lambda: [_b("bytes")]
-        two_chunk = lambda: [_b("by"), _b("tes")]
-        content1 = Content(content_type, one_chunk)
-        content2 = Content(content_type, one_chunk)
-        content3 = Content(content_type, two_chunk)
-        content4 = Content(content_type, lambda: [_b("by"), _b("te")])
-        content5 = Content(ContentType("f", "b"), two_chunk)
-        self.assertEqual(content1, content2)
-        self.assertEqual(content1, content3)
-        self.assertNotEqual(content1, content4)
-        self.assertNotEqual(content1, content5)
-
-    def test___repr__(self):
-        content = Content(ContentType("application", "octet-stream"),
-            lambda: [_b("\x00bin"), _b("ary\xff")])
-        self.assertIn("\\x00binary\\xff", repr(content))
-
-    def test_iter_text_not_text_errors(self):
-        content_type = ContentType("foo", "bar")
-        content = Content(content_type, lambda: ["bytes"])
-        self.assertThat(content.iter_text, raises_value_error)
-
-    def test_iter_text_decodes(self):
-        content_type = ContentType("text", "strange", {"charset": "utf8"})
-        content = Content(
-            content_type, lambda: [_u("bytes\xea").encode("utf8")])
-        self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
-
-    def test_iter_text_default_charset_iso_8859_1(self):
-        content_type = ContentType("text", "strange")
-        text = _u("bytes\xea")
-        iso_version = text.encode("ISO-8859-1")
-        content = Content(content_type, lambda: [iso_version])
-        self.assertEqual([text], list(content.iter_text()))
-
-    def test_as_text(self):
-        content_type = ContentType("text", "strange", {"charset": "utf8"})
-        content = Content(
-            content_type, lambda: [_u("bytes\xea").encode("utf8")])
-        self.assertEqual(_u("bytes\xea"), content.as_text())
-
-    def test_from_file(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        os.write(fd, _b('some data'))
-        os.close(fd)
-        content = content_from_file(path, UTF8_TEXT, chunk_size=2)
-        self.assertThat(
-            list(content.iter_bytes()),
-            Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')]))
-
-    def test_from_nonexistent_file(self):
-        directory = tempfile.mkdtemp()
-        nonexistent = os.path.join(directory, 'nonexistent-file')
-        content = content_from_file(nonexistent)
-        self.assertThat(content.iter_bytes, raises(IOError))
-
-    def test_from_file_default_type(self):
-        content = content_from_file('/nonexistent/path')
-        self.assertThat(content.content_type, Equals(UTF8_TEXT))
-
-    def test_from_file_eager_loading(self):
-        fd, path = tempfile.mkstemp()
-        os.write(fd, _b('some data'))
-        os.close(fd)
-        content = content_from_file(path, UTF8_TEXT, buffer_now=True)
-        os.remove(path)
-        self.assertThat(
-            ''.join(content.iter_text()), Equals('some data'))
-
-    def test_from_file_with_simple_seek(self):
-        f = tempfile.NamedTemporaryFile()
-        f.write(_b('some data'))
-        f.flush()
-        self.addCleanup(f.close)
-        content = content_from_file(
-            f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
-        self.assertThat(
-            list(content.iter_bytes()), Equals([_b('data')]))
-
-    def test_from_file_with_whence_seek(self):
-        f = tempfile.NamedTemporaryFile()
-        f.write(_b('some data'))
-        f.flush()
-        self.addCleanup(f.close)
-        content = content_from_file(
-            f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
-        self.assertThat(
-            list(content.iter_bytes()), Equals([_b('data')]))
-
-    def test_from_stream(self):
-        data = StringIO('some data')
-        content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
-        self.assertThat(
-            list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a']))
-
-    def test_from_stream_default_type(self):
-        data = StringIO('some data')
-        content = content_from_stream(data)
-        self.assertThat(content.content_type, Equals(UTF8_TEXT))
-
-    def test_from_stream_eager_loading(self):
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        self.addCleanup(os.close, fd)
-        os.write(fd, _b('some data'))
-        stream = open(path, 'rb')
-        self.addCleanup(stream.close)
-        content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
-        os.write(fd, _b('more data'))
-        self.assertThat(
-            ''.join(content.iter_text()), Equals('some data'))
-
-    def test_from_stream_with_simple_seek(self):
-        data = BytesIO(_b('some data'))
-        content = content_from_stream(
-            data, UTF8_TEXT, chunk_size=50, seek_offset=5)
-        self.assertThat(
-            list(content.iter_bytes()), Equals([_b('data')]))
-
-    def test_from_stream_with_whence_seek(self):
-        data = BytesIO(_b('some data'))
-        content = content_from_stream(
-            data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
-        self.assertThat(
-            list(content.iter_bytes()), Equals([_b('data')]))
-
-    def test_from_text(self):
-        data = _u("some data")
-        expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
-        self.assertEqual(expected, text_content(data))
-
-    @skipUnless(str_is_unicode, "Test only applies in python 3.")
-    def test_text_content_raises_TypeError_when_passed_bytes(self):
-        data = _b("Some Bytes")
-        self.assertRaises(TypeError, text_content, data)
-
-    def test_text_content_raises_TypeError_when_passed_non_text(self):
-        bad_values = (None, list(), dict(), 42, 1.23)
-        for value in bad_values:
-            self.assertThat(
-                lambda: text_content(value),
-                raises(
-                    TypeError("text_content must be given text, not '%s'." %
-                        type(value).__name__)
-                ),
-            )
-
-    def test_json_content(self):
-        data = {'foo': 'bar'}
-        expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
-        self.assertEqual(expected, json_content(data))
-
-
-class TestStackLinesContent(TestCase):
-
-    def _get_stack_line_and_expected_output(self):
-        stack_lines = [
-            ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
-        ]
-        expected = '  File "/path/to/file", line 42, in some_function\n' \
-                   '    print("Hello World")\n'
-        return stack_lines, expected
-
-    def test_single_stack_line(self):
-        stack_lines, expected = self._get_stack_line_and_expected_output()
-        actual = StackLinesContent(stack_lines).as_text()
-
-        self.assertEqual(expected, actual)
-
-    def test_prefix_content(self):
-        stack_lines, expected = self._get_stack_line_and_expected_output()
-        prefix = self.getUniqueString() + '\n'
-        content = StackLinesContent(stack_lines, prefix_content=prefix)
-        actual = content.as_text()
-        expected = prefix  + expected
-
-        self.assertEqual(expected, actual)
-
-    def test_postfix_content(self):
-        stack_lines, expected = self._get_stack_line_and_expected_output()
-        postfix = '\n' + self.getUniqueString()
-        content = StackLinesContent(stack_lines, postfix_content=postfix)
-        actual = content.as_text()
-        expected = expected + postfix
-
-        self.assertEqual(expected, actual)
-
-    def test___init___sets_content_type(self):
-        stack_lines, expected = self._get_stack_line_and_expected_output()
-        content = StackLinesContent(stack_lines)
-        expected_content_type = ContentType("text", "x-traceback",
-            {"language": "python", "charset": "utf8"})
-
-        self.assertEqual(expected_content_type, content.content_type)
-
-
-class TestTracebackContent(TestCase):
-
-    def test___init___None_errors(self):
-        self.assertThat(
-            lambda: TracebackContent(None, None), raises_value_error)
-
-    def test___init___sets_ivars(self):
-        content = TracebackContent(an_exc_info, self)
-        content_type = ContentType("text", "x-traceback",
-            {"language": "python", "charset": "utf8"})
-        self.assertEqual(content_type, content.content_type)
-        result = unittest.TestResult()
-        expected = result._exc_info_to_string(an_exc_info, self)
-        self.assertEqual(expected, ''.join(list(content.iter_text())))
-
-
-class TestStacktraceContent(TestCase):
-
-    def test___init___sets_ivars(self):
-        content = StacktraceContent()
-        content_type = ContentType("text", "x-traceback",
-            {"language": "python", "charset": "utf8"})
-
-        self.assertEqual(content_type, content.content_type)
-
-    def test_prefix_is_used(self):
-        prefix = self.getUniqueString()
-        actual = StacktraceContent(prefix_content=prefix).as_text()
-
-        self.assertTrue(actual.startswith(prefix))
-
-    def test_postfix_is_used(self):
-        postfix = self.getUniqueString()
-        actual = StacktraceContent(postfix_content=postfix).as_text()
-
-        self.assertTrue(actual.endswith(postfix))
-
-    def test_top_frame_is_skipped_when_no_stack_is_specified(self):
-        actual = StacktraceContent().as_text()
-
-        self.assertTrue('testtools/content.py' not in actual)
-
-
-class TestAttachFile(TestCase):
-
-    def make_file(self, data):
-        # GZ 2011-04-21: This helper could be useful for methods above trying
-        #                to use mkstemp, but should handle write failures and
-        #                always close the fd. There must be a better way.
-        fd, path = tempfile.mkstemp()
-        self.addCleanup(os.remove, path)
-        os.write(fd, _b(data))
-        os.close(fd)
-        return path
-
-    def test_simple(self):
-        class SomeTest(TestCase):
-            def test_foo(self):
-                pass
-        test = SomeTest('test_foo')
-        data = 'some data'
-        path = self.make_file(data)
-        my_content = text_content(data)
-        attach_file(test, path, name='foo')
-        self.assertEqual({'foo': my_content}, test.getDetails())
-
-    def test_optional_name(self):
-        # If no name is provided, attach_file just uses the base name of the
-        # file.
-        class SomeTest(TestCase):
-            def test_foo(self):
-                pass
-        test = SomeTest('test_foo')
-        path = self.make_file('some data')
-        base_path = os.path.basename(path)
-        attach_file(test, path)
-        self.assertEqual([base_path], list(test.getDetails()))
-
-    def test_lazy_read(self):
-        class SomeTest(TestCase):
-            def test_foo(self):
-                pass
-        test = SomeTest('test_foo')
-        path = self.make_file('some data')
-        attach_file(test, path, name='foo', buffer_now=False)
-        content = test.getDetails()['foo']
-        content_file = open(path, 'w')
-        content_file.write('new data')
-        content_file.close()
-        self.assertEqual(''.join(content.iter_text()), 'new data')
-
-    def test_eager_read_by_default(self):
-        class SomeTest(TestCase):
-            def test_foo(self):
-                pass
-        test = SomeTest('test_foo')
-        path = self.make_file('some data')
-        attach_file(test, path, name='foo')
-        content = test.getDetails()['foo']
-        content_file = open(path, 'w')
-        content_file.write('new data')
-        content_file.close()
-        self.assertEqual(''.join(content.iter_text()), 'some data')
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_content_type.py b/lib/testtools/testtools/tests/test_content_type.py
deleted file mode 100644
index 2d34f95..0000000
--- a/lib/testtools/testtools/tests/test_content_type.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
-
-from testtools import TestCase
-from testtools.matchers import Equals, MatchesException, Raises
-from testtools.content_type import (
-    ContentType,
-    JSON,
-    UTF8_TEXT,
-    )
-
-
-class TestContentType(TestCase):
-
-    def test___init___None_errors(self):
-        raises_value_error = Raises(MatchesException(ValueError))
-        self.assertThat(lambda:ContentType(None, None), raises_value_error)
-        self.assertThat(lambda:ContentType(None, "traceback"),
-            raises_value_error)
-        self.assertThat(lambda:ContentType("text", None), raises_value_error)
-
-    def test___init___sets_ivars(self):
-        content_type = ContentType("foo", "bar")
-        self.assertEqual("foo", content_type.type)
-        self.assertEqual("bar", content_type.subtype)
-        self.assertEqual({}, content_type.parameters)
-
-    def test___init___with_parameters(self):
-        content_type = ContentType("foo", "bar", {"quux": "thing"})
-        self.assertEqual({"quux": "thing"}, content_type.parameters)
-
-    def test___eq__(self):
-        content_type1 = ContentType("foo", "bar", {"quux": "thing"})
-        content_type2 = ContentType("foo", "bar", {"quux": "thing"})
-        content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
-        self.assertTrue(content_type1.__eq__(content_type2))
-        self.assertFalse(content_type1.__eq__(content_type3))
-
-    def test_basic_repr(self):
-        content_type = ContentType('text', 'plain')
-        self.assertThat(repr(content_type), Equals('text/plain'))
-
-    def test_extended_repr(self):
-        content_type = ContentType(
-            'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
-        self.assertThat(
-            repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
-
-
-class TestBuiltinContentTypes(TestCase):
-
-    def test_plain_text(self):
-        # The UTF8_TEXT content type represents UTF-8 encoded text/plain.
-        self.assertThat(UTF8_TEXT.type, Equals('text'))
-        self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
-        self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
-
-    def test_json_content(self):
-        # The JSON content type represents implictly UTF-8 application/json.
-        self.assertThat(JSON.type, Equals('application'))
-        self.assertThat(JSON.subtype, Equals('json'))
-        self.assertThat(JSON.parameters, Equals({}))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_deferredruntest.py b/lib/testtools/testtools/tests/test_deferredruntest.py
deleted file mode 100644
index 3310926..0000000
--- a/lib/testtools/testtools/tests/test_deferredruntest.py
+++ /dev/null
@@ -1,777 +0,0 @@
-# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
-
-"""Tests for the DeferredRunTest single test execution logic."""
-
-import os
-import signal
-
-from extras import try_import
-
-from testtools import (
-    skipIf,
-    TestCase,
-    TestResult,
-    )
-from testtools.content import (
-    text_content,
-    )
-from testtools.matchers import (
-    Equals,
-    KeysEqual,
-    MatchesException,
-    Raises,
-    )
-from testtools.runtest import RunTest
-from testtools.testresult.doubles import ExtendedTestResult
-from testtools.tests.test_spinner import NeedsTwistedTestCase
-
-assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
-AsynchronousDeferredRunTest = try_import(
-    'testtools.deferredruntest.AsynchronousDeferredRunTest')
-flush_logged_errors = try_import(
-    'testtools.deferredruntest.flush_logged_errors')
-SynchronousDeferredRunTest = try_import(
-    'testtools.deferredruntest.SynchronousDeferredRunTest')
-
-defer = try_import('twisted.internet.defer')
-failure = try_import('twisted.python.failure')
-log = try_import('twisted.python.log')
-DelayedCall = try_import('twisted.internet.base.DelayedCall')
-
-
-class X(object):
-    """Tests that we run as part of our tests, nested to avoid discovery."""
-
-    class Base(TestCase):
-        def setUp(self):
-            super(X.Base, self).setUp()
-            self.calls = ['setUp']
-            self.addCleanup(self.calls.append, 'clean-up')
-        def test_something(self):
-            self.calls.append('test')
-        def tearDown(self):
-            self.calls.append('tearDown')
-            super(X.Base, self).tearDown()
-
-    class BaseExceptionRaised(Base):
-        expected_calls = ['setUp', 'tearDown', 'clean-up']
-        expected_results = [('addError', SystemExit)]
-        def test_something(self):
-            raise SystemExit(0)
-
-    class ErrorInSetup(Base):
-        expected_calls = ['setUp', 'clean-up']
-        expected_results = [('addError', RuntimeError)]
-        def setUp(self):
-            super(X.ErrorInSetup, self).setUp()
-            raise RuntimeError("Error in setUp")
-
-    class ErrorInTest(Base):
-        expected_calls = ['setUp', 'tearDown', 'clean-up']
-        expected_results = [('addError', RuntimeError)]
-        def test_something(self):
-            raise RuntimeError("Error in test")
-
-    class FailureInTest(Base):
-        expected_calls = ['setUp', 'tearDown', 'clean-up']
-        expected_results = [('addFailure', AssertionError)]
-        def test_something(self):
-            self.fail("test failed")
-
-    class ErrorInTearDown(Base):
-        expected_calls = ['setUp', 'test', 'clean-up']
-        expected_results = [('addError', RuntimeError)]
-        def tearDown(self):
-            raise RuntimeError("Error in tearDown")
-
-    class ErrorInCleanup(Base):
-        expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
-        expected_results = [('addError', ZeroDivisionError)]
-        def test_something(self):
-            self.calls.append('test')
-            self.addCleanup(lambda: 1/0)
-
-    class TestIntegration(NeedsTwistedTestCase):
-
-        def assertResultsMatch(self, test, result):
-            events = list(result._events)
-            self.assertEqual(('startTest', test), events.pop(0))
-            for expected_result in test.expected_results:
-                result = events.pop(0)
-                if len(expected_result) == 1:
-                    self.assertEqual((expected_result[0], test), result)
-                else:
-                    self.assertEqual((expected_result[0], test), result[:2])
-                    error_type = expected_result[1]
-                    self.assertIn(error_type.__name__, str(result[2]))
-            self.assertEqual([('stopTest', test)], events)
-
-        def test_runner(self):
-            result = ExtendedTestResult()
-            test = self.test_factory('test_something', runTest=self.runner)
-            if self.test_factory is X.BaseExceptionRaised:
-                self.assertRaises(SystemExit, test.run, result)
-            else:
-                test.run(result)
-            self.assertEqual(test.calls, self.test_factory.expected_calls)
-            self.assertResultsMatch(test, result)
-
-
-def make_integration_tests():
-    from unittest import TestSuite
-    from testtools import clone_test_with_new_id
-    runners = [
-        ('RunTest', RunTest),
-        ('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
-        ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
-        ]
-
-    tests = [
-        X.BaseExceptionRaised,
-        X.ErrorInSetup,
-        X.ErrorInTest,
-        X.ErrorInTearDown,
-        X.FailureInTest,
-        X.ErrorInCleanup,
-        ]
-    base_test = X.TestIntegration('test_runner')
-    integration_tests = []
-    for runner_name, runner in runners:
-        for test in tests:
-            new_test = clone_test_with_new_id(
-                base_test, '%s(%s, %s)' % (
-                    base_test.id(),
-                    runner_name,
-                    test.__name__))
-            new_test.test_factory = test
-            new_test.runner = runner
-            integration_tests.append(new_test)
-    return TestSuite(integration_tests)
-
-
-class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
-
-    def make_result(self):
-        return ExtendedTestResult()
-
-    def make_runner(self, test):
-        return SynchronousDeferredRunTest(test, test.exception_handlers)
-
-    def test_success(self):
-        class SomeCase(TestCase):
-            def test_success(self):
-                return defer.succeed(None)
-        test = SomeCase('test_success')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            result._events, Equals([
-                ('startTest', test),
-                ('addSuccess', test),
-                ('stopTest', test)]))
-
-    def test_failure(self):
-        class SomeCase(TestCase):
-            def test_failure(self):
-                return defer.maybeDeferred(self.fail, "Egads!")
-        test = SomeCase('test_failure')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events], Equals([
-                ('startTest', test),
-                ('addFailure', test),
-                ('stopTest', test)]))
-
-    def test_setUp_followed_by_test(self):
-        class SomeCase(TestCase):
-            def setUp(self):
-                super(SomeCase, self).setUp()
-                return defer.succeed(None)
-            def test_failure(self):
-                return defer.maybeDeferred(self.fail, "Egads!")
-        test = SomeCase('test_failure')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events], Equals([
-                ('startTest', test),
-                ('addFailure', test),
-                ('stopTest', test)]))
-
-
-class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
-
-    def make_reactor(self):
-        from twisted.internet import reactor
-        return reactor
-
-    def make_result(self):
-        return ExtendedTestResult()
-
-    def make_runner(self, test, timeout=None):
-        if timeout is None:
-            timeout = self.make_timeout()
-        return AsynchronousDeferredRunTest(
-            test, test.exception_handlers, timeout=timeout)
-
-    def make_timeout(self):
-        return 0.005
-
-    def test_setUp_returns_deferred_that_fires_later(self):
-        # setUp can return a Deferred that might fire at any time.
-        # AsynchronousDeferredRunTest will not go on to running the test until
-        # the Deferred returned by setUp actually fires.
-        call_log = []
-        marker = object()
-        d = defer.Deferred().addCallback(call_log.append)
-        class SomeCase(TestCase):
-            def setUp(self):
-                super(SomeCase, self).setUp()
-                call_log.append('setUp')
-                return d
-            def test_something(self):
-                call_log.append('test')
-        def fire_deferred():
-            self.assertThat(call_log, Equals(['setUp']))
-            d.callback(marker)
-        test = SomeCase('test_something')
-        timeout = self.make_timeout()
-        runner = self.make_runner(test, timeout=timeout)
-        result = self.make_result()
-        reactor = self.make_reactor()
-        reactor.callLater(timeout, fire_deferred)
-        runner.run(result)
-        self.assertThat(call_log, Equals(['setUp', marker, 'test']))
-
-    def test_calls_setUp_test_tearDown_in_sequence(self):
-        # setUp, the test method and tearDown can all return
-        # Deferreds. AsynchronousDeferredRunTest will make sure that each of
-        # these are run in turn, only going on to the next stage once the
-        # Deferred from the previous stage has fired.
-        call_log = []
-        a = defer.Deferred()
-        a.addCallback(lambda x: call_log.append('a'))
-        b = defer.Deferred()
-        b.addCallback(lambda x: call_log.append('b'))
-        c = defer.Deferred()
-        c.addCallback(lambda x: call_log.append('c'))
-        class SomeCase(TestCase):
-            def setUp(self):
-                super(SomeCase, self).setUp()
-                call_log.append('setUp')
-                return a
-            def test_success(self):
-                call_log.append('test')
-                return b
-            def tearDown(self):
-                super(SomeCase, self).tearDown()
-                call_log.append('tearDown')
-                return c
-        test = SomeCase('test_success')
-        timeout = self.make_timeout()
-        runner = self.make_runner(test, timeout)
-        result = self.make_result()
-        reactor = self.make_reactor()
-        def fire_a():
-            self.assertThat(call_log, Equals(['setUp']))
-            a.callback(None)
-        def fire_b():
-            self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
-            b.callback(None)
-        def fire_c():
-            self.assertThat(
-                call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
-            c.callback(None)
-        reactor.callLater(timeout * 0.25, fire_a)
-        reactor.callLater(timeout * 0.5, fire_b)
-        reactor.callLater(timeout * 0.75, fire_c)
-        runner.run(result)
-        self.assertThat(
-            call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
-
-    def test_async_cleanups(self):
-        # Cleanups added with addCleanup can return
-        # Deferreds. AsynchronousDeferredRunTest will run each of them in
-        # turn.
-        class SomeCase(TestCase):
-            def test_whatever(self):
-                pass
-        test = SomeCase('test_whatever')
-        call_log = []
-        a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
-        b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
-        c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
-        test.addCleanup(lambda: a)
-        test.addCleanup(lambda: b)
-        test.addCleanup(lambda: c)
-        def fire_a():
-            self.assertThat(call_log, Equals([]))
-            a.callback(None)
-        def fire_b():
-            self.assertThat(call_log, Equals(['a']))
-            b.callback(None)
-        def fire_c():
-            self.assertThat(call_log, Equals(['a', 'b']))
-            c.callback(None)
-        timeout = self.make_timeout()
-        reactor = self.make_reactor()
-        reactor.callLater(timeout * 0.25, fire_a)
-        reactor.callLater(timeout * 0.5, fire_b)
-        reactor.callLater(timeout * 0.75, fire_c)
-        runner = self.make_runner(test, timeout)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(call_log, Equals(['a', 'b', 'c']))
-
-    def test_clean_reactor(self):
-        # If there's cruft left over in the reactor, the test fails.
-        reactor = self.make_reactor()
-        timeout = self.make_timeout()
-        class SomeCase(TestCase):
-            def test_cruft(self):
-                reactor.callLater(timeout * 10.0, lambda: None)
-        test = SomeCase('test_cruft')
-        runner = self.make_runner(test, timeout)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events],
-            Equals(
-                [('startTest', test),
-                 ('addError', test),
-                 ('stopTest', test)]))
-        error = result._events[1][2]
-        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
-
-    def test_exports_reactor(self):
-        # The reactor is set as an attribute on the test case.
-        reactor = self.make_reactor()
-        timeout = self.make_timeout()
-        class SomeCase(TestCase):
-            def test_cruft(self):
-                self.assertIs(reactor, self.reactor)
-        test = SomeCase('test_cruft')
-        runner = self.make_runner(test, timeout)
-        result = TestResult()
-        runner.run(result)
-        self.assertEqual([], result.errors)
-        self.assertEqual([], result.failures)
-
-    def test_unhandled_error_from_deferred(self):
-        # If there's a Deferred with an unhandled error, the test fails.  Each
-        # unhandled error is reported with a separate traceback.
-        class SomeCase(TestCase):
-            def test_cruft(self):
-                # Note we aren't returning the Deferred so that the error will
-                # be unhandled.
-                defer.maybeDeferred(lambda: 1/0)
-                defer.maybeDeferred(lambda: 2/0)
-        test = SomeCase('test_cruft')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        error = result._events[1][2]
-        result._events[1] = ('addError', test, None)
-        self.assertThat(result._events, Equals(
-            [('startTest', test),
-             ('addError', test, None),
-             ('stopTest', test)]))
-        self.assertThat(
-            error, KeysEqual(
-                'twisted-log',
-                'unhandled-error-in-deferred',
-                'unhandled-error-in-deferred-1',
-                ))
-
-    def test_unhandled_error_from_deferred_combined_with_error(self):
-        # If there's a Deferred with an unhandled error, the test fails.  Each
-        # unhandled error is reported with a separate traceback, and the error
-        # is still reported.
-        class SomeCase(TestCase):
-            def test_cruft(self):
-                # Note we aren't returning the Deferred so that the error will
-                # be unhandled.
-                defer.maybeDeferred(lambda: 1/0)
-                2 / 0
-        test = SomeCase('test_cruft')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        error = result._events[1][2]
-        result._events[1] = ('addError', test, None)
-        self.assertThat(result._events, Equals(
-            [('startTest', test),
-             ('addError', test, None),
-             ('stopTest', test)]))
-        self.assertThat(
-            error, KeysEqual(
-                'traceback',
-                'twisted-log',
-                'unhandled-error-in-deferred',
-                ))
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_keyboard_interrupt_stops_test_run(self):
-        # If we get a SIGINT during a test run, the test stops and no more
-        # tests run.
-        SIGINT = getattr(signal, 'SIGINT', None)
-        if not SIGINT:
-            raise self.skipTest("SIGINT unavailable")
-        class SomeCase(TestCase):
-            def test_pause(self):
-                return defer.Deferred()
-        test = SomeCase('test_pause')
-        reactor = self.make_reactor()
-        timeout = self.make_timeout()
-        runner = self.make_runner(test, timeout * 5)
-        result = self.make_result()
-        reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
-        self.assertThat(lambda:runner.run(result),
-            Raises(MatchesException(KeyboardInterrupt)))
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_fast_keyboard_interrupt_stops_test_run(self):
-        # If we get a SIGINT during a test run, the test stops and no more
-        # tests run.
-        SIGINT = getattr(signal, 'SIGINT', None)
-        if not SIGINT:
-            raise self.skipTest("SIGINT unavailable")
-        class SomeCase(TestCase):
-            def test_pause(self):
-                return defer.Deferred()
-        test = SomeCase('test_pause')
-        reactor = self.make_reactor()
-        timeout = self.make_timeout()
-        runner = self.make_runner(test, timeout * 5)
-        result = self.make_result()
-        reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
-        self.assertThat(lambda:runner.run(result),
-            Raises(MatchesException(KeyboardInterrupt)))
-
-    def test_timeout_causes_test_error(self):
-        # If a test times out, it reports itself as having failed with a
-        # TimeoutError.
-        class SomeCase(TestCase):
-            def test_pause(self):
-                return defer.Deferred()
-        test = SomeCase('test_pause')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        error = result._events[1][2]
-        self.assertThat(
-            [event[:2] for event in result._events], Equals(
-            [('startTest', test),
-             ('addError', test),
-             ('stopTest', test)]))
-        self.assertIn('TimeoutError', str(error['traceback']))
-
-    def test_convenient_construction(self):
-        # As a convenience method, AsynchronousDeferredRunTest has a
-        # classmethod that returns an AsynchronousDeferredRunTest
-        # factory. This factory has the same API as the RunTest constructor.
-        reactor = object()
-        timeout = object()
-        handler = object()
-        factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
-        runner = factory(self, [handler])
-        self.assertIs(reactor, runner._reactor)
-        self.assertIs(timeout, runner._timeout)
-        self.assertIs(self, runner.case)
-        self.assertEqual([handler], runner.handlers)
-
-    def test_use_convenient_factory(self):
-        # Make sure that the factory can actually be used.
-        factory = AsynchronousDeferredRunTest.make_factory()
-        class SomeCase(TestCase):
-            run_tests_with = factory
-            def test_something(self):
-                pass
-        case = SomeCase('test_something')
-        case.run()
-
-    def test_convenient_construction_default_reactor(self):
-        # As a convenience method, AsynchronousDeferredRunTest has a
-        # classmethod that returns an AsynchronousDeferredRunTest
-        # factory. This factory has the same API as the RunTest constructor.
-        reactor = object()
-        handler = object()
-        factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
-        runner = factory(self, [handler])
-        self.assertIs(reactor, runner._reactor)
-        self.assertIs(self, runner.case)
-        self.assertEqual([handler], runner.handlers)
-
-    def test_convenient_construction_default_timeout(self):
-        # As a convenience method, AsynchronousDeferredRunTest has a
-        # classmethod that returns an AsynchronousDeferredRunTest
-        # factory. This factory has the same API as the RunTest constructor.
-        timeout = object()
-        handler = object()
-        factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
-        runner = factory(self, [handler])
-        self.assertIs(timeout, runner._timeout)
-        self.assertIs(self, runner.case)
-        self.assertEqual([handler], runner.handlers)
-
-    def test_convenient_construction_default_debugging(self):
-        # As a convenience method, AsynchronousDeferredRunTest has a
-        # classmethod that returns an AsynchronousDeferredRunTest
-        # factory. This factory has the same API as the RunTest constructor.
-        handler = object()
-        factory = AsynchronousDeferredRunTest.make_factory(debug=True)
-        runner = factory(self, [handler])
-        self.assertIs(self, runner.case)
-        self.assertEqual([handler], runner.handlers)
-        self.assertEqual(True, runner._debug)
-
-    def test_deferred_error(self):
-        class SomeTest(TestCase):
-            def test_something(self):
-                return defer.maybeDeferred(lambda: 1/0)
-        test = SomeTest('test_something')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events],
-            Equals([
-                ('startTest', test),
-                ('addError', test),
-                ('stopTest', test)]))
-        error = result._events[1][2]
-        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
-
-    def test_only_addError_once(self):
-        # Even if the reactor is unclean and the test raises an error and the
-        # cleanups raise errors, we only called addError once per test.
-        reactor = self.make_reactor()
-        class WhenItRains(TestCase):
-            def it_pours(self):
-                # Add a dirty cleanup.
-                self.addCleanup(lambda: 3 / 0)
-                # Dirty the reactor.
-                from twisted.internet.protocol import ServerFactory
-                reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
-                # Unhandled error.
-                defer.maybeDeferred(lambda: 2 / 0)
-                # Actual error.
-                raise RuntimeError("Excess precipitation")
-        test = WhenItRains('it_pours')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events],
-            Equals([
-                ('startTest', test),
-                ('addError', test),
-                ('stopTest', test)]))
-        error = result._events[1][2]
-        self.assertThat(
-            error, KeysEqual(
-                'traceback',
-                'traceback-1',
-                'traceback-2',
-                'twisted-log',
-                'unhandled-error-in-deferred',
-                ))
-
-    def test_log_err_is_error(self):
-        # An error logged during the test run is recorded as an error in the
-        # tests.
-        class LogAnError(TestCase):
-            def test_something(self):
-                try:
-                    1/0
-                except ZeroDivisionError:
-                    f = failure.Failure()
-                log.err(f)
-        test = LogAnError('test_something')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events],
-            Equals([
-                ('startTest', test),
-                ('addError', test),
-                ('stopTest', test)]))
-        error = result._events[1][2]
-        self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
-
-    def test_log_err_flushed_is_success(self):
-        # An error logged during the test run is recorded as an error in the
-        # tests.
-        class LogAnError(TestCase):
-            def test_something(self):
-                try:
-                    1/0
-                except ZeroDivisionError:
-                    f = failure.Failure()
-                log.err(f)
-                flush_logged_errors(ZeroDivisionError)
-        test = LogAnError('test_something')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            result._events,
-            Equals([
-                ('startTest', test),
-                ('addSuccess', test, {'twisted-log': text_content('')}),
-                ('stopTest', test)]))
-
-    def test_log_in_details(self):
-        class LogAnError(TestCase):
-            def test_something(self):
-                log.msg("foo")
-                1/0
-        test = LogAnError('test_something')
-        runner = self.make_runner(test)
-        result = self.make_result()
-        runner.run(result)
-        self.assertThat(
-            [event[:2] for event in result._events],
-            Equals([
-                ('startTest', test),
-                ('addError', test),
-                ('stopTest', test)]))
-        error = result._events[1][2]
-        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
-
-    def test_debugging_unchanged_during_test_by_default(self):
-        debugging = [(defer.Deferred.debug, DelayedCall.debug)]
-        class SomeCase(TestCase):
-            def test_debugging_enabled(self):
-                debugging.append((defer.Deferred.debug, DelayedCall.debug))
-        test = SomeCase('test_debugging_enabled')
-        runner = AsynchronousDeferredRunTest(
-            test, handlers=test.exception_handlers,
-            reactor=self.make_reactor(), timeout=self.make_timeout())
-        runner.run(self.make_result())
-        self.assertEqual(debugging[0], debugging[1])
-
-    def test_debugging_enabled_during_test_with_debug_flag(self):
-        self.patch(defer.Deferred, 'debug', False)
-        self.patch(DelayedCall, 'debug', False)
-        debugging = []
-        class SomeCase(TestCase):
-            def test_debugging_enabled(self):
-                debugging.append((defer.Deferred.debug, DelayedCall.debug))
-        test = SomeCase('test_debugging_enabled')
-        runner = AsynchronousDeferredRunTest(
-            test, handlers=test.exception_handlers,
-            reactor=self.make_reactor(), timeout=self.make_timeout(),
-            debug=True)
-        runner.run(self.make_result())
-        self.assertEqual([(True, True)], debugging)
-        self.assertEqual(False, defer.Deferred.debug)
-        self.assertEqual(False, defer.Deferred.debug)
-
-
-class TestAssertFailsWith(NeedsTwistedTestCase):
-    """Tests for `assert_fails_with`."""
-
-    if SynchronousDeferredRunTest is not None:
-        run_tests_with = SynchronousDeferredRunTest
-
-    def test_assert_fails_with_success(self):
-        # assert_fails_with fails the test if it's given a Deferred that
-        # succeeds.
-        marker = object()
-        d = assert_fails_with(defer.succeed(marker), RuntimeError)
-        def check_result(failure):
-            failure.trap(self.failureException)
-            self.assertThat(
-                str(failure.value),
-                Equals("RuntimeError not raised (%r returned)" % (marker,)))
-        d.addCallbacks(
-            lambda x: self.fail("Should not have succeeded"), check_result)
-        return d
-
-    def test_assert_fails_with_success_multiple_types(self):
-        # assert_fails_with fails the test if it's given a Deferred that
-        # succeeds.
-        marker = object()
-        d = assert_fails_with(
-            defer.succeed(marker), RuntimeError, ZeroDivisionError)
-        def check_result(failure):
-            failure.trap(self.failureException)
-            self.assertThat(
-                str(failure.value),
-                Equals("RuntimeError, ZeroDivisionError not raised "
-                       "(%r returned)" % (marker,)))
-        d.addCallbacks(
-            lambda x: self.fail("Should not have succeeded"), check_result)
-        return d
-
-    def test_assert_fails_with_wrong_exception(self):
-        # assert_fails_with fails the test if it's given a Deferred that
-        # succeeds.
-        d = assert_fails_with(
-            defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
-        def check_result(failure):
-            failure.trap(self.failureException)
-            lines = str(failure.value).splitlines()
-            self.assertThat(
-                lines[:2],
-                Equals([
-                    ("ZeroDivisionError raised instead of RuntimeError, "
-                     "KeyboardInterrupt:"),
-                    " Traceback (most recent call last):",
-                    ]))
-        d.addCallbacks(
-            lambda x: self.fail("Should not have succeeded"), check_result)
-        return d
-
-    def test_assert_fails_with_expected_exception(self):
-        # assert_fails_with calls back with the value of the failure if it's
-        # one of the expected types of failures.
-        try:
-            1/0
-        except ZeroDivisionError:
-            f = failure.Failure()
-        d = assert_fails_with(defer.fail(f), ZeroDivisionError)
-        return d.addCallback(self.assertThat, Equals(f.value))
-
-    def test_custom_failure_exception(self):
-        # If assert_fails_with is passed a 'failureException' keyword
-        # argument, then it will raise that instead of `AssertionError`.
-        class CustomException(Exception):
-            pass
-        marker = object()
-        d = assert_fails_with(
-            defer.succeed(marker), RuntimeError,
-            failureException=CustomException)
-        def check_result(failure):
-            failure.trap(CustomException)
-            self.assertThat(
-                str(failure.value),
-                Equals("RuntimeError not raised (%r returned)" % (marker,)))
-        return d.addCallbacks(
-            lambda x: self.fail("Should not have succeeded"), check_result)
-
-
-class TestRunWithLogObservers(NeedsTwistedTestCase):
-
-    def test_restores_observers(self):
-        from testtools.deferredruntest import run_with_log_observers
-        from twisted.python import log
-        # Make sure there's at least one observer.  This reproduces bug
-        # #926189.
-        log.addObserver(lambda *args: None)
-        observers = list(log.theLogPublisher.observers)
-        run_with_log_observers([], lambda: None)
-        self.assertEqual(observers, log.theLogPublisher.observers)
-
-
-def test_suite():
-    from unittest import TestLoader, TestSuite
-    return TestSuite(
-        [TestLoader().loadTestsFromName(__name__),
-         make_integration_tests()])
diff --git a/lib/testtools/testtools/tests/test_distutilscmd.py b/lib/testtools/testtools/tests/test_distutilscmd.py
deleted file mode 100644
index fd0dd90..0000000
--- a/lib/testtools/testtools/tests/test_distutilscmd.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
-
-"""Tests for the distutils test command logic."""
-
-from distutils.dist import Distribution
-
-from extras import try_import
-
-from testtools.compat import (
-    _b,
-    _u,
-    BytesIO,
-    )
-fixtures = try_import('fixtures')
-
-import testtools
-from testtools import TestCase
-from testtools.distutilscmd import TestCommand
-from testtools.matchers import MatchesRegex
-
-
-if fixtures:
-    class SampleTestFixture(fixtures.Fixture):
-        """Creates testtools.runexample temporarily."""
-
-        def __init__(self):
-            self.package = fixtures.PythonPackage(
-            'runexample', [('__init__.py', _b("""
-from testtools import TestCase
-
-class TestFoo(TestCase):
-    def test_bar(self):
-        pass
-    def test_quux(self):
-        pass
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
-"""))])
-
-        def setUp(self):
-            super(SampleTestFixture, self).setUp()
-            self.useFixture(self.package)
-            testtools.__path__.append(self.package.base)
-            self.addCleanup(testtools.__path__.remove, self.package.base)
-
-
-class TestCommandTest(TestCase):
-
-    def setUp(self):
-        super(TestCommandTest, self).setUp()
-        if fixtures is None:
-            self.skipTest("Need fixtures")
-
-    def test_test_module(self):
-        self.useFixture(SampleTestFixture())
-        stdout = self.useFixture(fixtures.StringStream('stdout'))
-        dist = Distribution()
-        dist.script_name = 'setup.py'
-        dist.script_args = ['test']
-        dist.cmdclass = {'test': TestCommand}
-        dist.command_options = {
-            'test': {'test_module': ('command line', 'testtools.runexample')}}
-        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
-            cmd = dist.reinitialize_command('test')
-            dist.run_command('test')
-        self.assertThat(
-            stdout.getDetails()['stdout'].as_text(),
-            MatchesRegex(_u("""Tests running...
-
-Ran 2 tests in \\d.\\d\\d\\ds
-OK
-""")))
-
-    def test_test_suite(self):
-        self.useFixture(SampleTestFixture())
-        stdout = self.useFixture(fixtures.StringStream('stdout'))
-        dist = Distribution()
-        dist.script_name = 'setup.py'
-        dist.script_args = ['test']
-        dist.cmdclass = {'test': TestCommand}
-        dist.command_options = {
-            'test': {
-                'test_suite': (
-                    'command line', 'testtools.runexample.test_suite')}}
-        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
-            cmd = dist.reinitialize_command('test')
-            dist.run_command('test')
-        self.assertThat(
-            stdout.getDetails()['stdout'].as_text(),
-            MatchesRegex(_u("""Tests running...
-
-Ran 2 tests in \\d.\\d\\d\\ds
-OK
-""")))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_fixturesupport.py b/lib/testtools/testtools/tests/test_fixturesupport.py
deleted file mode 100644
index e309045..0000000
--- a/lib/testtools/testtools/tests/test_fixturesupport.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
-
-import unittest
-
-from extras import try_import
-
-from testtools import (
-    TestCase,
-    content,
-    content_type,
-    )
-from testtools.compat import _b, _u
-from testtools.matchers import Contains
-from testtools.testresult.doubles import (
-    ExtendedTestResult,
-    )
-
-fixtures = try_import('fixtures')
-LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
-
-
-class TestFixtureSupport(TestCase):
-
-    def setUp(self):
-        super(TestFixtureSupport, self).setUp()
-        if fixtures is None or LoggingFixture is None:
-            self.skipTest("Need fixtures")
-
-    def test_useFixture(self):
-        fixture = LoggingFixture()
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-        result = unittest.TestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertTrue(result.wasSuccessful())
-        self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
-
-    def test_useFixture_cleanups_raise_caught(self):
-        calls = []
-        def raiser(ignored):
-            calls.append('called')
-            raise Exception('foo')
-        fixture = fixtures.FunctionFixture(lambda:None, raiser)
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-        result = unittest.TestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertFalse(result.wasSuccessful())
-        self.assertEqual(['called'], calls)
-
-    def test_useFixture_details_captured(self):
-        class DetailsFixture(fixtures.Fixture):
-            def setUp(self):
-                fixtures.Fixture.setUp(self)
-                self.addCleanup(delattr, self, 'content')
-                self.content = [_b('content available until cleanUp')]
-                self.addDetail('content',
-                    content.Content(content_type.UTF8_TEXT, self.get_content))
-            def get_content(self):
-                return self.content
-        fixture = DetailsFixture()
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-                # Add a colliding detail (both should show up)
-                self.addDetail('content',
-                    content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')]))
-        result = ExtendedTestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertEqual('addSuccess', result._events[-2][0])
-        details = result._events[-2][2]
-        self.assertEqual(['content', 'content-1'], sorted(details.keys()))
-        self.assertEqual('foo', details['content'].as_text())
-        self.assertEqual('content available until cleanUp',
-            details['content-1'].as_text())
-
-    def test_useFixture_multiple_details_captured(self):
-        class DetailsFixture(fixtures.Fixture):
-            def setUp(self):
-                fixtures.Fixture.setUp(self)
-                self.addDetail('aaa', content.text_content("foo"))
-                self.addDetail('bbb', content.text_content("bar"))
-        fixture = DetailsFixture()
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-        result = ExtendedTestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertEqual('addSuccess', result._events[-2][0])
-        details = result._events[-2][2]
-        self.assertEqual(['aaa', 'bbb'], sorted(details))
-        self.assertEqual(_u('foo'), details['aaa'].as_text())
-        self.assertEqual(_u('bar'), details['bbb'].as_text())
-
-    def test_useFixture_details_captured_from_setUp(self):
-        # Details added during fixture set-up are gathered even if setUp()
-        # fails with an exception.
-        class BrokenFixture(fixtures.Fixture):
-            def setUp(self):
-                fixtures.Fixture.setUp(self)
-                self.addDetail('content', content.text_content("foobar"))
-                raise Exception()
-        fixture = BrokenFixture()
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-        result = ExtendedTestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertEqual('addError', result._events[-2][0])
-        details = result._events[-2][2]
-        self.assertEqual(['content', 'traceback'], sorted(details))
-        self.assertEqual('foobar', ''.join(details['content'].iter_text()))
-
-    def test_useFixture_original_exception_raised_if_gather_details_fails(self):
-        # In bug #1368440 it was reported that when a fixture fails setUp
-        # and gather_details errors on it, then the original exception that
-        # failed is not reported.
-        class BrokenFixture(fixtures.Fixture):
-            def getDetails(self):
-                raise AttributeError("getDetails broke")
-            def setUp(self):
-                fixtures.Fixture.setUp(self)
-                raise Exception("setUp broke")
-        fixture = BrokenFixture()
-        class SimpleTest(TestCase):
-            def test_foo(self):
-                self.useFixture(fixture)
-        result = ExtendedTestResult()
-        SimpleTest('test_foo').run(result)
-        self.assertEqual('addError', result._events[-2][0])
-        details = result._events[-2][2]
-        self.assertEqual(['traceback', 'traceback-1'], sorted(details))
-        self.assertThat(
-            ''.join(details['traceback'].iter_text()),
-            Contains('setUp broke'))
-        self.assertThat(
-            ''.join(details['traceback-1'].iter_text()),
-            Contains('getDetails broke'))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_helpers.py b/lib/testtools/testtools/tests/test_helpers.py
deleted file mode 100644
index 848c2f0..0000000
--- a/lib/testtools/testtools/tests/test_helpers.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
-
-from testtools import TestCase
-from testtools.tests.helpers import (
-    FullStackRunTest,
-    hide_testtools_stack,
-    is_stack_hidden,
-    )
-
-
-class TestStackHiding(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def setUp(self):
-        super(TestStackHiding, self).setUp()
-        self.addCleanup(hide_testtools_stack, is_stack_hidden())
-
-    def test_is_stack_hidden_consistent_true(self):
-        hide_testtools_stack(True)
-        self.assertEqual(True, is_stack_hidden())
-
-    def test_is_stack_hidden_consistent_false(self):
-        hide_testtools_stack(False)
-        self.assertEqual(False, is_stack_hidden())
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_monkey.py b/lib/testtools/testtools/tests/test_monkey.py
deleted file mode 100644
index 540a2ee..0000000
--- a/lib/testtools/testtools/tests/test_monkey.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright (c) 2010 Twisted Matrix Laboratories.
-# See LICENSE for details.
-
-"""Tests for testtools.monkey."""
-
-from testtools import TestCase
-from testtools.matchers import MatchesException, Raises
-from testtools.monkey import MonkeyPatcher, patch
-
-
-class TestObj:
-
-    def __init__(self):
-        self.foo = 'foo value'
-        self.bar = 'bar value'
-        self.baz = 'baz value'
-
-
-class MonkeyPatcherTest(TestCase):
-    """
-    Tests for 'MonkeyPatcher' monkey-patching class.
-    """
-
-    def setUp(self):
-        super(MonkeyPatcherTest, self).setUp()
-        self.test_object = TestObj()
-        self.original_object = TestObj()
-        self.monkey_patcher = MonkeyPatcher()
-
-    def test_empty(self):
-        # A monkey patcher without patches doesn't change a thing.
-        self.monkey_patcher.patch()
-
-        # We can't assert that all state is unchanged, but at least we can
-        # check our test object.
-        self.assertEquals(self.original_object.foo, self.test_object.foo)
-        self.assertEquals(self.original_object.bar, self.test_object.bar)
-        self.assertEquals(self.original_object.baz, self.test_object.baz)
-
-    def test_construct_with_patches(self):
-        # Constructing a 'MonkeyPatcher' with patches adds all of the given
-        # patches to the patch list.
-        patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
-                                (self.test_object, 'bar', 'hehe'))
-        patcher.patch()
-        self.assertEquals('haha', self.test_object.foo)
-        self.assertEquals('hehe', self.test_object.bar)
-        self.assertEquals(self.original_object.baz, self.test_object.baz)
-
-    def test_patch_existing(self):
-        # Patching an attribute that exists sets it to the value defined in the
-        # patch.
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
-        self.monkey_patcher.patch()
-        self.assertEquals(self.test_object.foo, 'haha')
-
-    def test_patch_non_existing(self):
-        # Patching a non-existing attribute sets it to the value defined in
-        # the patch.
-        self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
-        self.monkey_patcher.patch()
-        self.assertEquals(self.test_object.doesntexist, 'value')
-
-    def test_restore_non_existing(self):
-        # Restoring a value that didn't exist before the patch deletes the
-        # value.
-        self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
-        self.monkey_patcher.patch()
-        self.monkey_patcher.restore()
-        marker = object()
-        self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
-
-    def test_patch_already_patched(self):
-        # Adding a patch for an object and attribute that already have a patch
-        # overrides the existing patch.
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
-        self.monkey_patcher.patch()
-        self.assertEquals(self.test_object.foo, 'BLAH')
-        self.monkey_patcher.restore()
-        self.assertEquals(self.test_object.foo, self.original_object.foo)
-
-    def test_restore_twice_is_a_no_op(self):
-        # Restoring an already-restored monkey patch is a no-op.
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
-        self.monkey_patcher.patch()
-        self.monkey_patcher.restore()
-        self.assertEquals(self.test_object.foo, self.original_object.foo)
-        self.monkey_patcher.restore()
-        self.assertEquals(self.test_object.foo, self.original_object.foo)
-
-    def test_run_with_patches_decoration(self):
-        # run_with_patches runs the given callable, passing in all arguments
-        # and keyword arguments, and returns the return value of the callable.
-        log = []
-
-        def f(a, b, c=None):
-            log.append((a, b, c))
-            return 'foo'
-
-        result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
-        self.assertEquals('foo', result)
-        self.assertEquals([(1, 2, 10)], log)
-
-    def test_repeated_run_with_patches(self):
-        # We can call the same function with run_with_patches more than
-        # once. All patches apply for each call.
-        def f():
-            return (self.test_object.foo, self.test_object.bar,
-                    self.test_object.baz)
-
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
-        result = self.monkey_patcher.run_with_patches(f)
-        self.assertEquals(
-            ('haha', self.original_object.bar, self.original_object.baz),
-            result)
-        result = self.monkey_patcher.run_with_patches(f)
-        self.assertEquals(
-            ('haha', self.original_object.bar, self.original_object.baz),
-            result)
-
-    def test_run_with_patches_restores(self):
-        # run_with_patches restores the original values after the function has
-        # executed.
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
-        self.assertEquals(self.original_object.foo, self.test_object.foo)
-        self.monkey_patcher.run_with_patches(lambda: None)
-        self.assertEquals(self.original_object.foo, self.test_object.foo)
-
-    def test_run_with_patches_restores_on_exception(self):
-        # run_with_patches restores the original values even when the function
-        # raises an exception.
-        def _():
-            self.assertEquals(self.test_object.foo, 'haha')
-            self.assertEquals(self.test_object.bar, 'blahblah')
-            raise RuntimeError("Something went wrong!")
-
-        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
-        self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
-
-        self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
-            Raises(MatchesException(RuntimeError("Something went wrong!"))))
-        self.assertEquals(self.test_object.foo, self.original_object.foo)
-        self.assertEquals(self.test_object.bar, self.original_object.bar)
-
-
-class TestPatchHelper(TestCase):
-
-    def test_patch_patches(self):
-        # patch(obj, name, value) sets obj.name to value.
-        test_object = TestObj()
-        patch(test_object, 'foo', 42)
-        self.assertEqual(42, test_object.foo)
-
-    def test_patch_returns_cleanup(self):
-        # patch(obj, name, value) returns a nullary callable that restores obj
-        # to its original state when run.
-        test_object = TestObj()
-        original = test_object.foo
-        cleanup = patch(test_object, 'foo', 42)
-        cleanup()
-        self.assertEqual(original, test_object.foo)
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_run.py b/lib/testtools/testtools/tests/test_run.py
deleted file mode 100644
index ac4b9dd..0000000
--- a/lib/testtools/testtools/tests/test_run.py
+++ /dev/null
@@ -1,309 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Tests for the test runner logic."""
-
-from unittest import TestSuite
-import sys
-from textwrap import dedent
-
-from extras import try_import
-fixtures = try_import('fixtures')
-testresources = try_import('testresources')
-
-import testtools
-from testtools import TestCase, run, skipUnless
-from testtools.compat import (
-    _b,
-    _u,
-    StringIO,
-    )
-from testtools.matchers import (
-    Contains,
-    MatchesRegex,
-    )
-
-
-if fixtures:
-    class SampleTestFixture(fixtures.Fixture):
-        """Creates testtools.runexample temporarily."""
-
-        def __init__(self, broken=False):
-            """Create a SampleTestFixture.
-
-            :param broken: If True, the sample file will not be importable.
-            """
-            if not broken:
-                init_contents = _b("""\
-from testtools import TestCase
-
-class TestFoo(TestCase):
-    def test_bar(self):
-        pass
-    def test_quux(self):
-        pass
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
-""")
-            else:
-                init_contents = b"class not in\n"
-            self.package = fixtures.PythonPackage(
-            'runexample', [('__init__.py', init_contents)])
-
-        def setUp(self):
-            super(SampleTestFixture, self).setUp()
-            self.useFixture(self.package)
-            testtools.__path__.append(self.package.base)
-            self.addCleanup(testtools.__path__.remove, self.package.base)
-            self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
-
-
-if fixtures and testresources:
-    class SampleResourcedFixture(fixtures.Fixture):
-        """Creates a test suite that uses testresources."""
-
-        def __init__(self):
-            super(SampleResourcedFixture, self).__init__()
-            self.package = fixtures.PythonPackage(
-            'resourceexample', [('__init__.py', _b("""
-from fixtures import Fixture
-from testresources import (
-    FixtureResource,
-    OptimisingTestSuite,
-    ResourcedTestCase,
-    )
-from testtools import TestCase
-
-class Printer(Fixture):
-
-    def setUp(self):
-        super(Printer, self).setUp()
-        print('Setting up Printer')
-
-    def reset(self):
-        pass
-
-class TestFoo(TestCase, ResourcedTestCase):
-    # When run, this will print just one Setting up Printer, unless the
-    # OptimisingTestSuite is not honoured, when one per test case will print.
-    resources=[('res', FixtureResource(Printer()))]
-    def test_bar(self):
-        pass
-    def test_foo(self):
-        pass
-    def test_quux(self):
-        pass
-def test_suite():
-    from unittest import TestLoader
-    return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
-"""))])
-
-        def setUp(self):
-            super(SampleResourcedFixture, self).setUp()
-            self.useFixture(self.package)
-            self.addCleanup(testtools.__path__.remove, self.package.base)
-            testtools.__path__.append(self.package.base)
-
-
-if fixtures and run.have_discover:
-    class SampleLoadTestsPackage(fixtures.Fixture):
-        """Creates a test suite package using load_tests."""
-
-        def __init__(self):
-            super(SampleLoadTestsPackage, self).__init__()
-            self.package = fixtures.PythonPackage(
-            'discoverexample', [('__init__.py', _b("""
-from testtools import TestCase, clone_test_with_new_id
-
-class TestExample(TestCase):
-    def test_foo(self):
-        pass
-
-def load_tests(loader, tests, pattern):
-    tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
-    return tests
-"""))])
-
-        def setUp(self):
-            super(SampleLoadTestsPackage, self).setUp()
-            self.useFixture(self.package)
-            self.addCleanup(sys.path.remove, self.package.base)
-
-
-class TestRun(TestCase):
-
-    def setUp(self):
-        super(TestRun, self).setUp()
-        if fixtures is None:
-            self.skipTest("Need fixtures")
-
-    def test_run_custom_list(self):
-        self.useFixture(SampleTestFixture())
-        tests = []
-        class CaptureList(run.TestToolsTestRunner):
-            def list(self, test):
-                tests.append(set([case.id() for case
-                    in testtools.testsuite.iterate_tests(test)]))
-        out = StringIO()
-        try:
-            program = run.TestProgram(
-                argv=['prog', '-l', 'testtools.runexample.test_suite'],
-                stdout=out, testRunner=CaptureList)
-        except SystemExit:
-            exc_info = sys.exc_info()
-            raise AssertionError("-l tried to exit. %r" % exc_info[1])
-        self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
-            'testtools.runexample.TestFoo.test_quux'])], tests)
-
-    def test_run_list(self):
-        self.useFixture(SampleTestFixture())
-        out = StringIO()
-        try:
-            run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
-        except SystemExit:
-            exc_info = sys.exc_info()
-            raise AssertionError("-l tried to exit. %r" % exc_info[1])
-        self.assertEqual("""testtools.runexample.TestFoo.test_bar
-testtools.runexample.TestFoo.test_quux
-""", out.getvalue())
-
-    def test_run_list_failed_import(self):
-        if not run.have_discover:
-            self.skipTest("Need discover")
-        broken = self.useFixture(SampleTestFixture(broken=True))
-        out = StringIO()
-        exc = self.assertRaises(
-            SystemExit,
-            run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
-        self.assertEqual(2, exc.args[0])
-        self.assertEqual("""Failed to import
-runexample
-""", out.getvalue())
-
-    def test_run_orders_tests(self):
-        self.useFixture(SampleTestFixture())
-        out = StringIO()
-        # We load two tests - one that exists and one that doesn't, and we
-        # should get the one that exists and neither the one that doesn't nor
-        # the unmentioned one that does.
-        tempdir = self.useFixture(fixtures.TempDir())
-        tempname = tempdir.path + '/tests.list'
-        f = open(tempname, 'wb')
-        try:
-            f.write(_b("""
-testtools.runexample.TestFoo.test_bar
-testtools.runexample.missingtest
-"""))
-        finally:
-            f.close()
-        try:
-            run.main(['prog', '-l', '--load-list', tempname,
-                'testtools.runexample.test_suite'], out)
-        except SystemExit:
-            exc_info = sys.exc_info()
-            raise AssertionError("-l tried to exit. %r" % exc_info[1])
-        self.assertEqual("""testtools.runexample.TestFoo.test_bar
-""", out.getvalue())
-
-    def test_run_load_list(self):
-        self.useFixture(SampleTestFixture())
-        out = StringIO()
-        # We load two tests - one that exists and one that doesn't, and we
-        # should get the one that exists and neither the one that doesn't nor
-        # the unmentioned one that does.
-        tempdir = self.useFixture(fixtures.TempDir())
-        tempname = tempdir.path + '/tests.list'
-        f = open(tempname, 'wb')
-        try:
-            f.write(_b("""
-testtools.runexample.TestFoo.test_bar
-testtools.runexample.missingtest
-"""))
-        finally:
-            f.close()
-        try:
-            run.main(['prog', '-l', '--load-list', tempname,
-                'testtools.runexample.test_suite'], out)
-        except SystemExit:
-            exc_info = sys.exc_info()
-            raise AssertionError("-l tried to exit. %r" % exc_info[1])
-        self.assertEqual("""testtools.runexample.TestFoo.test_bar
-""", out.getvalue())
-
-    def test_load_list_preserves_custom_suites(self):
-        if testresources is None:
-            self.skipTest("Need testresources")
-        self.useFixture(SampleResourcedFixture())
-        # We load two tests, not loading one. Both share a resource, so we
-        # should see just one resource setup occur.
-        tempdir = self.useFixture(fixtures.TempDir())
-        tempname = tempdir.path + '/tests.list'
-        f = open(tempname, 'wb')
-        try:
-            f.write(_b("""
-testtools.resourceexample.TestFoo.test_bar
-testtools.resourceexample.TestFoo.test_foo
-"""))
-        finally:
-            f.close()
-        stdout = self.useFixture(fixtures.StringStream('stdout'))
-        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
-            try:
-                run.main(['prog', '--load-list', tempname,
-                    'testtools.resourceexample.test_suite'], stdout.stream)
-            except SystemExit:
-                # Evil resides in TestProgram.
-                pass
-        out = stdout.getDetails()['stdout'].as_text()
-        self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
-
-    def test_run_failfast(self):
-        stdout = self.useFixture(fixtures.StringStream('stdout'))
-
-        class Failing(TestCase):
-            def test_a(self):
-                self.fail('a')
-            def test_b(self):
-                self.fail('b')
-        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
-            runner = run.TestToolsTestRunner(failfast=True)
-            runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
-        self.assertThat(
-            stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
-
-    def test_stdout_honoured(self):
-        self.useFixture(SampleTestFixture())
-        tests = []
-        out = StringIO()
-        exc = self.assertRaises(SystemExit, run.main,
-            argv=['prog', 'testtools.runexample.test_suite'],
-            stdout=out)
-        self.assertEqual((0,), exc.args)
-        self.assertThat(
-            out.getvalue(),
-            MatchesRegex(_u("""Tests running...
-
-Ran 2 tests in \\d.\\d\\d\\ds
-OK
-""")))
-
-    @skipUnless(run.have_discover, "discovery not present")
-    @skipUnless(fixtures, "fixtures not present")
-    def test_issue_16662(self):
-        # unittest's discover implementation didn't handle load_tests on
-        # packages. That is fixed pending commit, but we want to offer it
-        # to all testtools users regardless of Python version.
-        # See http://bugs.python.org/issue16662
-        pkg = self.useFixture(SampleLoadTestsPackage())
-        out = StringIO()
-        self.assertEqual(None, run.main(
-            ['prog', 'discover', '-l', pkg.package.base], out))
-        self.assertEqual(dedent("""\
-            discoverexample.TestExample.test_foo
-            fred
-            """), out.getvalue())
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_runtest.py b/lib/testtools/testtools/tests/test_runtest.py
deleted file mode 100644
index 3ae8b13..0000000
--- a/lib/testtools/testtools/tests/test_runtest.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Tests for the RunTest single test execution logic."""
-
-from testtools import (
-    ExtendedToOriginalDecorator,
-    run_test_with,
-    RunTest,
-    TestCase,
-    TestResult,
-    )
-from testtools.matchers import MatchesException, Is, Raises
-from testtools.testresult.doubles import ExtendedTestResult
-from testtools.tests.helpers import FullStackRunTest
-
-
-class TestRunTest(TestCase):
-
-    run_tests_with = FullStackRunTest
-
-    def make_case(self):
-        class Case(TestCase):
-            def test(self):
-                pass
-        return Case('test')
-
-    def test___init___short(self):
-        run = RunTest("bar")
-        self.assertEqual("bar", run.case)
-        self.assertEqual([], run.handlers)
-
-    def test__init____handlers(self):
-        handlers = [("quux", "baz")]
-        run = RunTest("bar", handlers)
-        self.assertEqual(handlers, run.handlers)
-
-    def test__init____handlers_last_resort(self):
-        handlers = [("quux", "baz")]
-        last_resort = "foo"
-        run = RunTest("bar", handlers, last_resort)
-        self.assertEqual(last_resort, run.last_resort)
-
-    def test_run_with_result(self):
-        # test.run passes result down to _run_test_method.
-        log = []
-        class Case(TestCase):
-            def _run_test_method(self, result):
-                log.append(result)
-        case = Case('_run_test_method')
-        run = RunTest(case, lambda x: log.append(x))
-        result = TestResult()
-        run.run(result)
-        self.assertEqual(1, len(log))
-        self.assertEqual(result, log[0].decorated)
-
-    def test_run_no_result_manages_new_result(self):
-        log = []
-        run = RunTest(self.make_case(), lambda x: log.append(x) or x)
-        result = run.run()
-        self.assertIsInstance(result.decorated, TestResult)
-
-    def test__run_core_called(self):
-        case = self.make_case()
-        log = []
-        run = RunTest(case, lambda x: x)
-        run._run_core = lambda: log.append('foo')
-        run.run()
-        self.assertEqual(['foo'], log)
-
-    def test__run_prepared_result_does_not_mask_keyboard(self):
-        class Case(TestCase):
-            def test(self):
-                raise KeyboardInterrupt("go")
-        case = Case('test')
-        run = RunTest(case)
-        run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_prepared_result(run.result),
-            Raises(MatchesException(KeyboardInterrupt)))
-        self.assertEqual(
-            [('startTest', case), ('stopTest', case)], run.result._events)
-        # tearDown is still run though!
-        self.assertEqual(True, getattr(case, '_TestCase__teardown_called'))
-
-    def test__run_user_calls_onException(self):
-        case = self.make_case()
-        log = []
-        def handler(exc_info):
-            log.append("got it")
-            self.assertEqual(3, len(exc_info))
-            self.assertIsInstance(exc_info[1], KeyError)
-            self.assertIs(KeyError, exc_info[0])
-        case.addOnException(handler)
-        e = KeyError('Yo')
-        def raises():
-            raise e
-        run = RunTest(case, [(KeyError, None)])
-        run.result = ExtendedTestResult()
-        status = run._run_user(raises)
-        self.assertEqual(run.exception_caught, status)
-        self.assertEqual([], run.result._events)
-        self.assertEqual(["got it"], log)
-
-    def test__run_user_can_catch_Exception(self):
-        case = self.make_case()
-        e = Exception('Yo')
-        def raises():
-            raise e
-        log = []
-        run = RunTest(case, [(Exception, None)])
-        run.result = ExtendedTestResult()
-        status = run._run_user(raises)
-        self.assertEqual(run.exception_caught, status)
-        self.assertEqual([], run.result._events)
-        self.assertEqual([], log)
-
-    def test__run_prepared_result_uncaught_Exception_raised(self):
-        e = KeyError('Yo')
-        class Case(TestCase):
-            def test(self):
-                raise e
-        case = Case('test')
-        log = []
-        def log_exc(self, result, err):
-            log.append((result, err))
-        run = RunTest(case, [(ValueError, log_exc)])
-        run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_prepared_result(run.result),
-            Raises(MatchesException(KeyError)))
-        self.assertEqual(
-            [('startTest', case), ('stopTest', case)], run.result._events)
-        self.assertEqual([], log)
-
-    def test__run_prepared_result_uncaught_Exception_triggers_error(self):
-        # https://bugs.launchpad.net/testtools/+bug/1364188
-        # When something isn't handled, the test that was
-        # executing has errored, one way or another.
-        e = SystemExit(0)
-        class Case(TestCase):
-            def test(self):
-                raise e
-        case = Case('test')
-        log = []
-        def log_exc(self, result, err):
-            log.append((result, err))
-        run = RunTest(case, [], log_exc)
-        run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_prepared_result(run.result),
-            Raises(MatchesException(SystemExit)))
-        self.assertEqual(
-            [('startTest', case), ('stopTest', case)], run.result._events)
-        self.assertEqual([(run.result, e)], log)
-
-    def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
-        case = self.make_case()
-        def broken_handler(exc_info):
-            # ValueError because thats what we know how to catch - and must
-            # not.
-            raise ValueError('boo')
-        case.addOnException(broken_handler)
-        e = KeyError('Yo')
-        def raises():
-            raise e
-        log = []
-        def log_exc(self, result, err):
-            log.append((result, err))
-        run = RunTest(case, [(ValueError, log_exc)])
-        run.result = ExtendedTestResult()
-        self.assertThat(lambda: run._run_user(raises),
-            Raises(MatchesException(ValueError)))
-        self.assertEqual([], run.result._events)
-        self.assertEqual([], log)
-
-    def test__run_user_returns_result(self):
-        case = self.make_case()
-        def returns():
-            return 1
-        run = RunTest(case)
-        run.result = ExtendedTestResult()
-        self.assertEqual(1, run._run_user(returns))
-        self.assertEqual([], run.result._events)
-
-    def test__run_one_decorates_result(self):
-        log = []
-        class Run(RunTest):
-            def _run_prepared_result(self, result):
-                log.append(result)
-                return result
-        run = Run(self.make_case(), lambda x: x)
-        result = run._run_one('foo')
-        self.assertEqual([result], log)
-        self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
-        self.assertEqual('foo', result.decorated)
-
-    def test__run_prepared_result_calls_start_and_stop_test(self):
-        result = ExtendedTestResult()
-        case = self.make_case()
-        run = RunTest(case, lambda x: x)
-        run.run(result)
-        self.assertEqual([
-            ('startTest', case),
-            ('addSuccess', case),
-            ('stopTest', case),
-            ], result._events)
-
-    def test__run_prepared_result_calls_stop_test_always(self):
-        result = ExtendedTestResult()
-        case = self.make_case()
-        def inner():
-            raise Exception("foo")
-        run = RunTest(case, lambda x: x)
-        run._run_core = inner
-        self.assertThat(lambda: run.run(result),
-            Raises(MatchesException(Exception("foo"))))
-        self.assertEqual([
-            ('startTest', case),
-            ('stopTest', case),
-            ], result._events)
-
-
-class CustomRunTest(RunTest):
-
-    marker = object()
-
-    def run(self, result=None):
-        return self.marker
-
-
-class TestTestCaseSupportForRunTest(TestCase):
-
-    def test_pass_custom_run_test(self):
-        class SomeCase(TestCase):
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo', runTest=CustomRunTest)
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(CustomRunTest.marker))
-
-    def test_default_is_runTest_class_variable(self):
-        class SomeCase(TestCase):
-            run_tests_with = CustomRunTest
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo')
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(CustomRunTest.marker))
-
-    def test_constructor_argument_overrides_class_variable(self):
-        # If a 'runTest' argument is passed to the test's constructor, that
-        # overrides the class variable.
-        marker = object()
-        class DifferentRunTest(RunTest):
-            def run(self, result=None):
-                return marker
-        class SomeCase(TestCase):
-            run_tests_with = CustomRunTest
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo', runTest=DifferentRunTest)
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(marker))
-
-    def test_decorator_for_run_test(self):
-        # Individual test methods can be marked as needing a special runner.
-        class SomeCase(TestCase):
-            @run_test_with(CustomRunTest)
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo')
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(CustomRunTest.marker))
-
-    def test_extended_decorator_for_run_test(self):
-        # Individual test methods can be marked as needing a special runner.
-        # Extra arguments can be passed to the decorator which will then be
-        # passed on to the RunTest object.
-        marker = object()
-        class FooRunTest(RunTest):
-            def __init__(self, case, handlers=None, bar=None):
-                super(FooRunTest, self).__init__(case, handlers)
-                self.bar = bar
-            def run(self, result=None):
-                return self.bar
-        class SomeCase(TestCase):
-            @run_test_with(FooRunTest, bar=marker)
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo')
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(marker))
-
-    def test_works_as_inner_decorator(self):
-        # Even if run_test_with is the innermost decorator, it will be
-        # respected.
-        def wrapped(function):
-            """Silly, trivial decorator."""
-            def decorated(*args, **kwargs):
-                return function(*args, **kwargs)
-            decorated.__name__ = function.__name__
-            decorated.__dict__.update(function.__dict__)
-            return decorated
-        class SomeCase(TestCase):
-            @wrapped
-            @run_test_with(CustomRunTest)
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo')
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(CustomRunTest.marker))
-
-    def test_constructor_overrides_decorator(self):
-        # If a 'runTest' argument is passed to the test's constructor, that
-        # overrides the decorator.
-        marker = object()
-        class DifferentRunTest(RunTest):
-            def run(self, result=None):
-                return marker
-        class SomeCase(TestCase):
-            @run_test_with(CustomRunTest)
-            def test_foo(self):
-                pass
-        result = TestResult()
-        case = SomeCase('test_foo', runTest=DifferentRunTest)
-        from_run_test = case.run(result)
-        self.assertThat(from_run_test, Is(marker))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_spinner.py b/lib/testtools/testtools/tests/test_spinner.py
deleted file mode 100644
index 31110ca..0000000
--- a/lib/testtools/testtools/tests/test_spinner.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# Copyright (c) 2010 testtools developers. See LICENSE for details.
-
-"""Tests for the evil Twisted reactor-spinning we do."""
-
-import os
-import signal
-
-from extras import try_import
-
-from testtools import (
-    skipIf,
-    TestCase,
-    )
-from testtools.matchers import (
-    Equals,
-    Is,
-    MatchesException,
-    Raises,
-    )
-
-_spinner = try_import('testtools._spinner')
-
-defer = try_import('twisted.internet.defer')
-Failure = try_import('twisted.python.failure.Failure')
-
-
-class NeedsTwistedTestCase(TestCase):
-
-    def setUp(self):
-        super(NeedsTwistedTestCase, self).setUp()
-        if defer is None or Failure is None:
-            self.skipTest("Need Twisted to run")
-
-
-class TestNotReentrant(NeedsTwistedTestCase):
-
-    def test_not_reentrant(self):
-        # A function decorated as not being re-entrant will raise a
-        # _spinner.ReentryError if it is called while it is running.
-        calls = []
-        @_spinner.not_reentrant
-        def log_something():
-            calls.append(None)
-            if len(calls) < 5:
-                log_something()
-        self.assertThat(
-            log_something, Raises(MatchesException(_spinner.ReentryError)))
-        self.assertEqual(1, len(calls))
-
-    def test_deeper_stack(self):
-        calls = []
-        @_spinner.not_reentrant
-        def g():
-            calls.append(None)
-            if len(calls) < 5:
-                f()
-        @_spinner.not_reentrant
-        def f():
-            calls.append(None)
-            if len(calls) < 5:
-                g()
-        self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
-        self.assertEqual(2, len(calls))
-
-
-class TestExtractResult(NeedsTwistedTestCase):
-
-    def test_not_fired(self):
-        # _spinner.extract_result raises _spinner.DeferredNotFired if it's
-        # given a Deferred that has not fired.
-        self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
-            Raises(MatchesException(_spinner.DeferredNotFired)))
-
-    def test_success(self):
-        # _spinner.extract_result returns the value of the Deferred if it has
-        # fired successfully.
-        marker = object()
-        d = defer.succeed(marker)
-        self.assertThat(_spinner.extract_result(d), Equals(marker))
-
-    def test_failure(self):
-        # _spinner.extract_result raises the failure's exception if it's given
-        # a Deferred that is failing.
-        try:
-            1/0
-        except ZeroDivisionError:
-            f = Failure()
-        d = defer.fail(f)
-        self.assertThat(lambda:_spinner.extract_result(d),
-            Raises(MatchesException(ZeroDivisionError)))
-
-
-class TestTrapUnhandledErrors(NeedsTwistedTestCase):
-
-    def test_no_deferreds(self):
-        marker = object()
-        result, errors = _spinner.trap_unhandled_errors(lambda: marker)
-        self.assertEqual([], errors)
-        self.assertIs(marker, result)
-
-    def test_unhandled_error(self):
-        failures = []
-        def make_deferred_but_dont_handle():
-            try:
-                1/0
-            except ZeroDivisionError:
-                f = Failure()
-                failures.append(f)
-                defer.fail(f)
-        result, errors = _spinner.trap_unhandled_errors(
-            make_deferred_but_dont_handle)
-        self.assertIs(None, result)
-        self.assertEqual(failures, [error.failResult for error in errors])
-
-
-class TestRunInReactor(NeedsTwistedTestCase):
-
-    def make_reactor(self):
-        from twisted.internet import reactor
-        return reactor
-
-    def make_spinner(self, reactor=None):
-        if reactor is None:
-            reactor = self.make_reactor()
-        return _spinner.Spinner(reactor)
-
-    def make_timeout(self):
-        return 0.01
-
-    def test_function_called(self):
-        # run_in_reactor actually calls the function given to it.
-        calls = []
-        marker = object()
-        self.make_spinner().run(self.make_timeout(), calls.append, marker)
-        self.assertThat(calls, Equals([marker]))
-
-    def test_return_value_returned(self):
-        # run_in_reactor returns the value returned by the function given to
-        # it.
-        marker = object()
-        result = self.make_spinner().run(self.make_timeout(), lambda: marker)
-        self.assertThat(result, Is(marker))
-
-    def test_exception_reraised(self):
-        # If the given function raises an error, run_in_reactor re-raises that
-        # error.
-        self.assertThat(
-            lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
-            Raises(MatchesException(ZeroDivisionError)))
-
-    def test_keyword_arguments(self):
-        # run_in_reactor passes keyword arguments on.
-        calls = []
-        function = lambda *a, **kw: calls.extend([a, kw])
-        self.make_spinner().run(self.make_timeout(), function, foo=42)
-        self.assertThat(calls, Equals([(), {'foo': 42}]))
-
-    def test_not_reentrant(self):
-        # run_in_reactor raises an error if it is called inside another call
-        # to run_in_reactor.
-        spinner = self.make_spinner()
-        self.assertThat(lambda: spinner.run(
-            self.make_timeout(), spinner.run, self.make_timeout(),
-            lambda: None), Raises(MatchesException(_spinner.ReentryError)))
-
-    def test_deferred_value_returned(self):
-        # If the given function returns a Deferred, run_in_reactor returns the
-        # value in the Deferred at the end of the callback chain.
-        marker = object()
-        result = self.make_spinner().run(
-            self.make_timeout(), lambda: defer.succeed(marker))
-        self.assertThat(result, Is(marker))
-
-    def test_preserve_signal_handler(self):
-        signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
-        signals = filter(
-            None, (getattr(signal, name, None) for name in signals))
-        for sig in signals:
-            self.addCleanup(signal.signal, sig, signal.getsignal(sig))
-        new_hdlrs = list(lambda *a: None for _ in signals)
-        for sig, hdlr in zip(signals, new_hdlrs):
-            signal.signal(sig, hdlr)
-        spinner = self.make_spinner()
-        spinner.run(self.make_timeout(), lambda: None)
-        self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
-
-    def test_timeout(self):
-        # If the function takes too long to run, we raise a
-        # _spinner.TimeoutError.
-        timeout = self.make_timeout()
-        self.assertThat(
-            lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
-            Raises(MatchesException(_spinner.TimeoutError)))
-
-    def test_no_junk_by_default(self):
-        # If the reactor hasn't spun yet, then there cannot be any junk.
-        spinner = self.make_spinner()
-        self.assertThat(spinner.get_junk(), Equals([]))
-
-    def test_clean_do_nothing(self):
-        # If there's nothing going on in the reactor, then clean does nothing
-        # and returns an empty list.
-        spinner = self.make_spinner()
-        result = spinner._clean()
-        self.assertThat(result, Equals([]))
-
-    def test_clean_delayed_call(self):
-        # If there's a delayed call in the reactor, then clean cancels it and
-        # returns an empty list.
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        call = reactor.callLater(10, lambda: None)
-        results = spinner._clean()
-        self.assertThat(results, Equals([call]))
-        self.assertThat(call.active(), Equals(False))
-
-    def test_clean_delayed_call_cancelled(self):
-        # If there's a delayed call that's just been cancelled, then it's no
-        # longer there.
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        call = reactor.callLater(10, lambda: None)
-        call.cancel()
-        results = spinner._clean()
-        self.assertThat(results, Equals([]))
-
-    def test_clean_selectables(self):
-        # If there's still a selectable (e.g. a listening socket), then
-        # clean() removes it from the reactor's registry.
-        #
-        # Note that the socket is left open. This emulates a bug in trial.
-        from twisted.internet.protocol import ServerFactory
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
-        spinner.run(self.make_timeout(), lambda: None)
-        results = spinner.get_junk()
-        self.assertThat(results, Equals([port]))
-
-    def test_clean_running_threads(self):
-        import threading
-        import time
-        current_threads = list(threading.enumerate())
-        reactor = self.make_reactor()
-        timeout = self.make_timeout()
-        spinner = self.make_spinner(reactor)
-        spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
-        self.assertThat(list(threading.enumerate()), Equals(current_threads))
-
-    def test_leftover_junk_available(self):
-        # If 'run' is given a function that leaves the reactor dirty in some
-        # way, 'run' will clean up the reactor and then store information
-        # about the junk. This information can be got using get_junk.
-        from twisted.internet.protocol import ServerFactory
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        port = spinner.run(
-            self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
-        self.assertThat(spinner.get_junk(), Equals([port]))
-
-    def test_will_not_run_with_previous_junk(self):
-        # If 'run' is called and there's still junk in the spinner's junk
-        # list, then the spinner will refuse to run.
-        from twisted.internet.protocol import ServerFactory
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        timeout = self.make_timeout()
-        spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
-        self.assertThat(lambda: spinner.run(timeout, lambda: None),
-            Raises(MatchesException(_spinner.StaleJunkError)))
-
-    def test_clear_junk_clears_previous_junk(self):
-        # If 'run' is called and there's still junk in the spinner's junk
-        # list, then the spinner will refuse to run.
-        from twisted.internet.protocol import ServerFactory
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        timeout = self.make_timeout()
-        port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
-        junk = spinner.clear_junk()
-        self.assertThat(junk, Equals([port]))
-        self.assertThat(spinner.get_junk(), Equals([]))
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_sigint_raises_no_result_error(self):
-        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
-        SIGINT = getattr(signal, 'SIGINT', None)
-        if not SIGINT:
-            self.skipTest("SIGINT not available")
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        timeout = self.make_timeout()
-        reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
-        self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
-            Raises(MatchesException(_spinner.NoResultError)))
-        self.assertEqual([], spinner._clean())
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_sigint_raises_no_result_error_second_time(self):
-        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
-        # This test is exactly the same as test_sigint_raises_no_result_error,
-        # and exists to make sure we haven't futzed with state.
-        self.test_sigint_raises_no_result_error()
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_fast_sigint_raises_no_result_error(self):
-        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
-        SIGINT = getattr(signal, 'SIGINT', None)
-        if not SIGINT:
-            self.skipTest("SIGINT not available")
-        reactor = self.make_reactor()
-        spinner = self.make_spinner(reactor)
-        timeout = self.make_timeout()
-        reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
-        self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
-            Raises(MatchesException(_spinner.NoResultError)))
-        self.assertEqual([], spinner._clean())
-
-    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
-    def test_fast_sigint_raises_no_result_error_second_time(self):
-        self.test_fast_sigint_raises_no_result_error()
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_tags.py b/lib/testtools/testtools/tests/test_tags.py
deleted file mode 100644
index 5010f9a..0000000
--- a/lib/testtools/testtools/tests/test_tags.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) 2012 testtools developers. See LICENSE for details.
-
-"""Test tag support."""
-
-
-from testtools import TestCase
-from testtools.tags import TagContext
-
-
-class TestTags(TestCase):
-
-    def test_no_tags(self):
-        # A tag context has no tags initially.
-        tag_context = TagContext()
-        self.assertEqual(set(), tag_context.get_current_tags())
-
-    def test_add_tag(self):
-        # A tag added with change_tags appears in get_current_tags.
-        tag_context = TagContext()
-        tag_context.change_tags(set(['foo']), set())
-        self.assertEqual(set(['foo']), tag_context.get_current_tags())
-
-    def test_add_tag_twice(self):
-        # Calling change_tags twice to add tags adds both tags to the current
-        # tags.
-        tag_context = TagContext()
-        tag_context.change_tags(set(['foo']), set())
-        tag_context.change_tags(set(['bar']), set())
-        self.assertEqual(
-            set(['foo', 'bar']), tag_context.get_current_tags())
-
-    def test_change_tags_returns_tags(self):
-        # change_tags returns the current tags.  This is a convenience.
-        tag_context = TagContext()
-        tags = tag_context.change_tags(set(['foo']), set())
-        self.assertEqual(set(['foo']), tags)
-
-    def test_remove_tag(self):
-        # change_tags can remove tags from the context.
-        tag_context = TagContext()
-        tag_context.change_tags(set(['foo']), set())
-        tag_context.change_tags(set(), set(['foo']))
-        self.assertEqual(set(), tag_context.get_current_tags())
-
-    def test_child_context(self):
-        # A TagContext can have a parent.  If so, its tags are the tags of the
-        # parent at the moment of construction.
-        parent = TagContext()
-        parent.change_tags(set(['foo']), set())
-        child = TagContext(parent)
-        self.assertEqual(
-            parent.get_current_tags(), child.get_current_tags())
-
-    def test_add_to_child(self):
-        # Adding a tag to the child context doesn't affect the parent.
-        parent = TagContext()
-        parent.change_tags(set(['foo']), set())
-        child = TagContext(parent)
-        child.change_tags(set(['bar']), set())
-        self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
-        self.assertEqual(set(['foo']), parent.get_current_tags())
-
-    def test_remove_in_child(self):
-        # A tag that was in the parent context can be removed from the child
-        # context without affect the parent.
-        parent = TagContext()
-        parent.change_tags(set(['foo']), set())
-        child = TagContext(parent)
-        child.change_tags(set(), set(['foo']))
-        self.assertEqual(set(), child.get_current_tags())
-        self.assertEqual(set(['foo']), parent.get_current_tags())
-
-    def test_parent(self):
-        # The parent can be retrieved from a child context.
-        parent = TagContext()
-        parent.change_tags(set(['foo']), set())
-        child = TagContext(parent)
-        child.change_tags(set(), set(['foo']))
-        self.assertEqual(parent, child.parent)
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testcase.py b/lib/testtools/testtools/tests/test_testcase.py
deleted file mode 100644
index 4f3e146..0000000
--- a/lib/testtools/testtools/tests/test_testcase.py
+++ /dev/null
@@ -1,1733 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Tests for extensions to the base test library."""
-
-from doctest import ELLIPSIS
-from pprint import pformat
-import sys
-import unittest
-
-from testtools import (
-    DecorateTestCaseResult,
-    ErrorHolder,
-    MultipleExceptions,
-    PlaceHolder,
-    TestCase,
-    clone_test_with_new_id,
-    content,
-    skip,
-    skipIf,
-    skipUnless,
-    testcase,
-    )
-from testtools.compat import (
-    _b,
-    _u,
-    )
-from testtools.content import (
-    text_content,
-    TracebackContent,
-    )
-from testtools.matchers import (
-    Annotate,
-    DocTestMatches,
-    Equals,
-    HasLength,
-    MatchesException,
-    Raises,
-    )
-from testtools.testcase import (
-    attr,
-    Nullary,
-    WithAttributes,
-    )
-from testtools.testresult.doubles import (
-    Python26TestResult,
-    Python27TestResult,
-    ExtendedTestResult,
-    )
-from testtools.tests.helpers import (
-    an_exc_info,
-    FullStackRunTest,
-    LoggingResult,
-    )
-try:
-    exec('from __future__ import with_statement')
-except SyntaxError:
-    pass
-else:
-    from testtools.tests.test_with_with import *
-
-
-class TestPlaceHolder(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    def makePlaceHolder(self, test_id="foo", short_description=None):
-        return PlaceHolder(test_id, short_description)
-
-    def test_id_comes_from_constructor(self):
-        # The id() of a PlaceHolder is whatever you pass into the constructor.
-        test = PlaceHolder("test id")
-        self.assertEqual("test id", test.id())
-
-    def test_shortDescription_is_id(self):
-        # The shortDescription() of a PlaceHolder is the id, by default.
-        test = PlaceHolder("test id")
-        self.assertEqual(test.id(), test.shortDescription())
-
-    def test_shortDescription_specified(self):
-        # If a shortDescription is provided to the constructor, then
-        # shortDescription() returns that instead.
-        test = PlaceHolder("test id", "description")
-        self.assertEqual("description", test.shortDescription())
-
-    def test_repr_just_id(self):
-        # repr(placeholder) shows you how the object was constructed.
-        test = PlaceHolder("test id")
-        self.assertEqual(
-            "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr(
-            test.id()), repr(test))
-
-    def test_repr_with_description(self):
-        # repr(placeholder) shows you how the object was constructed.
-        test = PlaceHolder("test id", "description")
-        self.assertEqual(
-            "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % (
-            test.id(), test.shortDescription()), repr(test))
-
-    def test_repr_custom_outcome(self):
-        test = PlaceHolder("test id", outcome='addSkip')
-        self.assertEqual(
-            "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % (
-            test.id()), repr(test))
-
-    def test_counts_as_one_test(self):
-        # A placeholder test counts as one test.
-        test = self.makePlaceHolder()
-        self.assertEqual(1, test.countTestCases())
-
-    def test_str_is_id(self):
-        # str(placeholder) is always the id(). We are not barbarians.
-        test = self.makePlaceHolder()
-        self.assertEqual(test.id(), str(test))
-
-    def test_runs_as_success(self):
-        # When run, a PlaceHolder test records a success.
-        test = self.makePlaceHolder()
-        log = []
-        test.run(LoggingResult(log))
-        self.assertEqual(
-            [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
-             ('stopTest', test), ('tags', set(), set()),],
-            log)
-
-    def test_supplies_details(self):
-        details = {'quux':None}
-        test = PlaceHolder('foo', details=details)
-        result = ExtendedTestResult()
-        test.run(result)
-        self.assertEqual(
-            [('tags', set(), set()),
-             ('startTest', test),
-             ('addSuccess', test, details),
-             ('stopTest', test),
-             ('tags', set(), set()),
-             ],
-            result._events)
-
-    def test_supplies_timestamps(self):
-        test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
-        result = ExtendedTestResult()
-        test.run(result)
-        self.assertEqual(
-            [('time', "A"),
-             ('tags', set(), set()),
-             ('startTest', test),
-             ('time', "B"),
-             ('addSuccess', test),
-             ('stopTest', test),
-             ('tags', set(), set()),
-             ],
-            result._events)
-
-    def test_call_is_run(self):
-        # A PlaceHolder can be called, in which case it behaves like run.
-        test = self.makePlaceHolder()
-        run_log = []
-        test.run(LoggingResult(run_log))
-        call_log = []
-        test(LoggingResult(call_log))
-        self.assertEqual(run_log, call_log)
-
-    def test_runs_without_result(self):
-        # A PlaceHolder can be run without a result, in which case there's no
-        # way to actually get at the result.
-        self.makePlaceHolder().run()
-
-    def test_debug(self):
-        # A PlaceHolder can be debugged.
-        self.makePlaceHolder().debug()
-
-    def test_supports_tags(self):
-        result = ExtendedTestResult()
-        tags = set(['foo', 'bar'])
-        case = PlaceHolder("foo", tags=tags)
-        case.run(result)
-        self.assertEqual([
-            ('tags', tags, set()),
-            ('startTest', case),
-            ('addSuccess', case),
-            ('stopTest', case),
-            ('tags', set(), tags),
-            ], result._events)
-
-
-class TestErrorHolder(TestCase):
-    # Note that these tests exist because ErrorHolder exists - it could be
-    # deprecated and dropped at this point.
-
-    run_test_with = FullStackRunTest
-
-    def makeException(self):
-        try:
-            raise RuntimeError("danger danger")
-        except:
-            return sys.exc_info()
-
-    def makePlaceHolder(self, test_id="foo", error=None,
-                        short_description=None):
-        if error is None:
-            error = self.makeException()
-        return ErrorHolder(test_id, error, short_description)
-
-    def test_id_comes_from_constructor(self):
-        # The id() of a PlaceHolder is whatever you pass into the constructor.
-        test = ErrorHolder("test id", self.makeException())
-        self.assertEqual("test id", test.id())
-
-    def test_shortDescription_is_id(self):
-        # The shortDescription() of a PlaceHolder is the id, by default.
-        test = ErrorHolder("test id", self.makeException())
-        self.assertEqual(test.id(), test.shortDescription())
-
-    def test_shortDescription_specified(self):
-        # If a shortDescription is provided to the constructor, then
-        # shortDescription() returns that instead.
-        test = ErrorHolder("test id", self.makeException(), "description")
-        self.assertEqual("description", test.shortDescription())
-
-    def test_counts_as_one_test(self):
-        # A placeholder test counts as one test.
-        test = self.makePlaceHolder()
-        self.assertEqual(1, test.countTestCases())
-
-    def test_str_is_id(self):
-        # str(placeholder) is always the id(). We are not barbarians.
-        test = self.makePlaceHolder()
-        self.assertEqual(test.id(), str(test))
-
-    def test_runs_as_error(self):
-        # When run, an ErrorHolder test records an error.
-        error = self.makeException()
-        test = self.makePlaceHolder(error=error)
-        result = ExtendedTestResult()
-        log = result._events
-        test.run(result)
-        self.assertEqual(
-            [('tags', set(), set()),
-             ('startTest', test),
-             ('addError', test, test._details),
-             ('stopTest', test),
-             ('tags', set(), set())], log)
-
-    def test_call_is_run(self):
-        # A PlaceHolder can be called, in which case it behaves like run.
-        test = self.makePlaceHolder()
-        run_log = []
-        test.run(LoggingResult(run_log))
-        call_log = []
-        test(LoggingResult(call_log))
-        self.assertEqual(run_log, call_log)
-
-    def test_runs_without_result(self):
-        # A PlaceHolder can be run without a result, in which case there's no
-        # way to actually get at the result.
-        self.makePlaceHolder().run()
-
-    def test_debug(self):
-        # A PlaceHolder can be debugged.
-        self.makePlaceHolder().debug()
-
-
-class TestEquality(TestCase):
-    """Test ``TestCase``'s equality implementation."""
-
-    run_test_with = FullStackRunTest
-
-    def test_identicalIsEqual(self):
-        # TestCase's are equal if they are identical.
-        self.assertEqual(self, self)
-
-    def test_nonIdenticalInUnequal(self):
-        # TestCase's are not equal if they are not identical.
-        self.assertNotEqual(TestCase(methodName='run'),
-            TestCase(methodName='skip'))
-
-
-class TestAssertions(TestCase):
-    """Test assertions in TestCase."""
-
-    run_test_with = FullStackRunTest
-
-    def raiseError(self, exceptionFactory, *args, **kwargs):
-        raise exceptionFactory(*args, **kwargs)
-
-    def test_formatTypes_single(self):
-        # Given a single class, _formatTypes returns the name.
-        class Foo(object):
-            pass
-        self.assertEqual('Foo', self._formatTypes(Foo))
-
-    def test_formatTypes_multiple(self):
-        # Given multiple types, _formatTypes returns the names joined by
-        # commas.
-        class Foo(object):
-            pass
-        class Bar(object):
-            pass
-        self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
-
-    def test_assertRaises(self):
-        # assertRaises asserts that a callable raises a particular exception.
-        self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
-
-    def test_assertRaises_exception_w_metaclass(self):
-        # assertRaises works when called for exceptions with custom metaclasses
-        class MyExMeta(type):
-            def __init__(cls, name, bases, dct):
-                """ Do some dummy metaclass stuff """
-                dct.update({'answer': 42})
-                type.__init__(cls, name, bases, dct)
-
-        class MyEx(Exception):
-            __metaclass__ = MyExMeta
-
-        self.assertRaises(MyEx, self.raiseError, MyEx)
-
-    def test_assertRaises_fails_when_no_error_raised(self):
-        # assertRaises raises self.failureException when it's passed a
-        # callable that raises no error.
-        ret = ('orange', 42)
-        self.assertFails(
-            "<function ...<lambda> at ...> returned ('orange', 42)",
-            self.assertRaises, RuntimeError, lambda: ret)
-
-    def test_assertRaises_fails_when_different_error_raised(self):
-        # assertRaises re-raises an exception that it didn't expect.
-        self.assertThat(lambda: self.assertRaises(RuntimeError,
-            self.raiseError, ZeroDivisionError),
-            Raises(MatchesException(ZeroDivisionError)))
-
-    def test_assertRaises_returns_the_raised_exception(self):
-        # assertRaises returns the exception object that was raised. This is
-        # useful for testing that exceptions have the right message.
-
-        # This contraption stores the raised exception, so we can compare it
-        # to the return value of assertRaises.
-        raisedExceptions = []
-        def raiseError():
-            try:
-                raise RuntimeError('Deliberate error')
-            except RuntimeError:
-                raisedExceptions.append(sys.exc_info()[1])
-                raise
-
-        exception = self.assertRaises(RuntimeError, raiseError)
-        self.assertEqual(1, len(raisedExceptions))
-        self.assertTrue(
-            exception is raisedExceptions[0],
-            "%r is not %r" % (exception, raisedExceptions[0]))
-
-    def test_assertRaises_with_multiple_exceptions(self):
-        # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
-        # function raises one of ExceptionTwo or ExceptionOne.
-        expectedExceptions = (RuntimeError, ZeroDivisionError)
-        self.assertRaises(
-            expectedExceptions, self.raiseError, expectedExceptions[0])
-        self.assertRaises(
-            expectedExceptions, self.raiseError, expectedExceptions[1])
-
-    def test_assertRaises_with_multiple_exceptions_failure_mode(self):
-        # If assertRaises is called expecting one of a group of exceptions and
-        # a callable that doesn't raise an exception, then fail with an
-        # appropriate error message.
-        expectedExceptions = (RuntimeError, ZeroDivisionError)
-        self.assertRaises(
-            self.failureException,
-            self.assertRaises, expectedExceptions, lambda: None)
-        self.assertFails('<function ...<lambda> at ...> returned None',
-            self.assertRaises, expectedExceptions, lambda: None)
-
-    def test_assertRaises_function_repr_in_exception(self):
-        # When assertRaises fails, it includes the repr of the invoked
-        # function in the error message, so it's easy to locate the problem.
-        def foo():
-            """An arbitrary function."""
-            pass
-        self.assertThat(
-            lambda: self.assertRaises(Exception, foo),
-            Raises(
-                MatchesException(self.failureException, '.*%r.*' % (foo,))))
-
-    def assertFails(self, message, function, *args, **kwargs):
-        """Assert that function raises a failure with the given message."""
-        failure = self.assertRaises(
-            self.failureException, function, *args, **kwargs)
-        self.assertThat(failure, DocTestMatches(message, ELLIPSIS))
-
-    def test_assertIn_success(self):
-        # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
-        self.assertIn(3, range(10))
-        self.assertIn('foo', 'foo bar baz')
-        self.assertIn('foo', 'foo bar baz'.split())
-
-    def test_assertIn_failure(self):
-        # assertIn(needle, haystack) fails the test when 'needle' is not in
-        # 'haystack'.
-        self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
-        self.assertFails(
-            '%r not in %r' % ('qux', 'foo bar baz'),
-            self.assertIn, 'qux', 'foo bar baz')
-
-    def test_assertIn_failure_with_message(self):
-        # assertIn(needle, haystack) fails the test when 'needle' is not in
-        # 'haystack'.
-        self.assertFails('3 not in [0, 1, 2]: foo bar', self.assertIn, 3,
-                         [0, 1, 2], 'foo bar')
-        self.assertFails(
-            '%r not in %r: foo bar' % ('qux', 'foo bar baz'),
-            self.assertIn, 'qux', 'foo bar baz', 'foo bar')
-
-
-    def test_assertNotIn_success(self):
-        # assertNotIn(needle, haystack) asserts that 'needle' is not in
-        # 'haystack'.
-        self.assertNotIn(3, [0, 1, 2])
-        self.assertNotIn('qux', 'foo bar baz')
-
-    def test_assertNotIn_failure(self):
-        # assertNotIn(needle, haystack) fails the test when 'needle' is in
-        # 'haystack'.
-        self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn,
-            3, [1, 2, 3])
-        self.assertFails(
-            "'foo bar baz' matches Contains('foo')",
-            self.assertNotIn, 'foo', 'foo bar baz')
-
-
-    def test_assertNotIn_failure_with_message(self):
-        # assertNotIn(needle, haystack) fails the test when 'needle' is in
-        # 'haystack'.
-        self.assertFails('[1, 2, 3] matches Contains(3): foo bar', self.assertNotIn,
-            3, [1, 2, 3], 'foo bar')
-        self.assertFails(
-            "'foo bar baz' matches Contains('foo'): foo bar",
-            self.assertNotIn, 'foo', 'foo bar baz', "foo bar")
-
-
-
-    def test_assertIsInstance(self):
-        # assertIsInstance asserts that an object is an instance of a class.
-
-        class Foo(object):
-            """Simple class for testing assertIsInstance."""
-
-        foo = Foo()
-        self.assertIsInstance(foo, Foo)
-
-    def test_assertIsInstance_multiple_classes(self):
-        # assertIsInstance asserts that an object is an instance of one of a
-        # group of classes.
-
-        class Foo(object):
-            """Simple class for testing assertIsInstance."""
-
-        class Bar(object):
-            """Another simple class for testing assertIsInstance."""
-
-        foo = Foo()
-        self.assertIsInstance(foo, (Foo, Bar))
-        self.assertIsInstance(Bar(), (Foo, Bar))
-
-    def test_assertIsInstance_failure(self):
-        # assertIsInstance(obj, klass) fails the test when obj is not an
-        # instance of klass.
-
-        class Foo(object):
-            """Simple class for testing assertIsInstance."""
-
-        self.assertFails(
-            "'42' is not an instance of %s" % self._formatTypes(Foo),
-            self.assertIsInstance, 42, Foo)
-
-    def test_assertIsInstance_failure_multiple_classes(self):
-        # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
-        # not an instance of klass1 or klass2.
-
-        class Foo(object):
-            """Simple class for testing assertIsInstance."""
-
-        class Bar(object):
-            """Another simple class for testing assertIsInstance."""
-
-        self.assertFails(
-            "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]),
-            self.assertIsInstance, 42, (Foo, Bar))
-
-    def test_assertIsInstance_overridden_message(self):
-        # assertIsInstance(obj, klass, msg) permits a custom message.
-        self.assertFails("'42' is not an instance of str: foo",
-            self.assertIsInstance, 42, str, "foo")
-
-    def test_assertIs(self):
-        # assertIs asserts that an object is identical to another object.
-        self.assertIs(None, None)
-        some_list = [42]
-        self.assertIs(some_list, some_list)
-        some_object = object()
-        self.assertIs(some_object, some_object)
-
-    def test_assertIs_fails(self):
-        # assertIs raises assertion errors if one object is not identical to
-        # another.
-        self.assertFails('None is not 42', self.assertIs, None, 42)
-        self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
-
-    def test_assertIs_fails_with_message(self):
-        # assertIs raises assertion errors if one object is not identical to
-        # another, and includes a user-supplied message, if it's provided.
-        self.assertFails(
-            'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
-
-    def test_assertIsNot(self):
-        # assertIsNot asserts that an object is not identical to another
-        # object.
-        self.assertIsNot(None, 42)
-        self.assertIsNot([42], [42])
-        self.assertIsNot(object(), object())
-
-    def test_assertIsNot_fails(self):
-        # assertIsNot raises assertion errors if one object is identical to
-        # another.
-        self.assertFails('None matches Is(None)', self.assertIsNot, None, None)
-        some_list = [42]
-        self.assertFails(
-            '[42] matches Is([42])', self.assertIsNot, some_list, some_list)
-
-    def test_assertIsNot_fails_with_message(self):
-        # assertIsNot raises assertion errors if one object is identical to
-        # another, and includes a user-supplied message if it's provided.
-        self.assertFails(
-            'None matches Is(None): foo bar', self.assertIsNot, None, None,
-            "foo bar")
-
-    def test_assertThat_matches_clean(self):
-        class Matcher(object):
-            def match(self, foo):
-                return None
-        self.assertThat("foo", Matcher())
-
-    def test_assertThat_mismatch_raises_description(self):
-        calls = []
-        class Mismatch(object):
-            def __init__(self, thing):
-                self.thing = thing
-            def describe(self):
-                calls.append(('describe_diff', self.thing))
-                return "object is not a thing"
-            def get_details(self):
-                return {}
-        class Matcher(object):
-            def match(self, thing):
-                calls.append(('match', thing))
-                return Mismatch(thing)
-            def __str__(self):
-                calls.append(('__str__',))
-                return "a description"
-        class Test(TestCase):
-            def test(self):
-                self.assertThat("foo", Matcher())
-        result = Test("test").run()
-        self.assertEqual([
-            ('match', "foo"),
-            ('describe_diff', "foo"),
-            ], calls)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_assertThat_output(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = matcher.match(matchee).describe()
-        self.assertFails(expected, self.assertThat, matchee, matcher)
-
-    def test_assertThat_message_is_annotated(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = Annotate('woo', matcher).match(matchee).describe()
-        self.assertFails(expected, self.assertThat, matchee, matcher, 'woo')
-
-    def test_assertThat_verbose_output(self):
-        matchee = 'foo'
-        matcher = Equals('bar')
-        expected = (
-            'Match failed. Matchee: %r\n'
-            'Matcher: %s\n'
-            'Difference: %s\n' % (
-                matchee,
-                matcher,
-                matcher.match(matchee).describe(),
-                ))
-        self.assertFails(
-            expected, self.assertThat, matchee, matcher, verbose=True)
-
-    def test_expectThat_matches_clean(self):
-        class Matcher(object):
-            def match(self, foo):
-                return None
-        self.expectThat("foo", Matcher())
-
-    def test_expectThat_mismatch_fails_test(self):
-        class Test(TestCase):
-            def test(self):
-                self.expectThat("foo", Equals("bar"))
-        result = Test("test").run()
-        self.assertFalse(result.wasSuccessful())
-
-    def test_expectThat_does_not_exit_test(self):
-        class Test(TestCase):
-            marker = False
-            def test(self):
-                self.expectThat("foo", Equals("bar"))
-                Test.marker = True
-        result = Test("test").run()
-        self.assertFalse(result.wasSuccessful())
-        self.assertTrue(Test.marker)
-
-    def test_expectThat_adds_detail(self):
-        class Test(TestCase):
-            def test(self):
-                self.expectThat("foo", Equals("bar"))
-        test = Test("test")
-        result = test.run()
-        details = test.getDetails()
-        self.assertTrue("Failed expectation" in details)
-
-    def test__force_failure_fails_test(self):
-        class Test(TestCase):
-            def test_foo(self):
-                self.force_failure = True
-                self.remaining_code_run = True
-        test = Test('test_foo')
-        result = test.run()
-        self.assertFalse(result.wasSuccessful())
-        self.assertTrue(test.remaining_code_run)
-
-    def get_error_string(self, e):
-        """Get the string showing how 'e' would be formatted in test output.
-
-        This is a little bit hacky, since it's designed to give consistent
-        output regardless of Python version.
-
-        In testtools, TestResult._exc_info_to_unicode is the point of dispatch
-        between various different implementations of methods that format
-        exceptions, so that's what we have to call. However, that method cares
-        about stack traces and formats the exception class. We don't care
-        about either of these, so we take its output and parse it a little.
-        """
-        error = TracebackContent((e.__class__, e, None), self).as_text()
-        # We aren't at all interested in the traceback.
-        if error.startswith('Traceback (most recent call last):\n'):
-            lines = error.splitlines(True)[1:]
-            for i, line in enumerate(lines):
-                if not line.startswith(' '):
-                    break
-            error = ''.join(lines[i:])
-        # We aren't interested in how the exception type is formatted.
-        exc_class, error = error.split(': ', 1)
-        return error
-
-    def test_assertThat_verbose_unicode(self):
-        # When assertThat is given matchees or matchers that contain non-ASCII
-        # unicode strings, we can still provide a meaningful error.
-        matchee = _u('\xa7')
-        matcher = Equals(_u('a'))
-        expected = (
-            'Match failed. Matchee: %s\n'
-            'Matcher: %s\n'
-            'Difference: %s\n\n' % (
-                repr(matchee).replace("\\xa7", matchee),
-                matcher,
-                matcher.match(matchee).describe(),
-                ))
-        e = self.assertRaises(
-            self.failureException, self.assertThat, matchee, matcher,
-            verbose=True)
-        self.assertEqual(expected, self.get_error_string(e))
-
-    def test_assertEqual_nice_formatting(self):
-        message = "These things ought not be equal."
-        a = ['apple', 'banana', 'cherry']
-        b = {'Thatcher': 'One who mends roofs of straw',
-             'Major': 'A military officer, ranked below colonel',
-             'Blair': 'To shout loudly',
-             'Brown': 'The colour of healthy human faeces'}
-        expected_error = '\n'.join([
-            '!=:',
-            'reference = %s' % pformat(a),
-            'actual    = %s' % pformat(b),
-            ': ' + message,
-            ])
-        self.assertFails(expected_error, self.assertEqual, a, b, message)
-        self.assertFails(expected_error, self.assertEquals, a, b, message)
-        self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
-
-    def test_assertEqual_formatting_no_message(self):
-        a = "cat"
-        b = "dog"
-        expected_error = "'cat' != 'dog'"
-        self.assertFails(expected_error, self.assertEqual, a, b)
-        self.assertFails(expected_error, self.assertEquals, a, b)
-        self.assertFails(expected_error, self.failUnlessEqual, a, b)
-
-    def test_assertEqual_non_ascii_str_with_newlines(self):
-        message = _u("Be careful mixing unicode and bytes")
-        a = "a\n\xa7\n"
-        b = "Just a longish string so the more verbose output form is used."
-        expected_error = '\n'.join([
-            '!=:',
-            "reference = '''\\",
-            'a',
-            repr('\xa7')[1:-1],
-            "'''",
-            'actual    = %r' % (b,),
-            ': ' + message,
-            ])
-        self.assertFails(expected_error, self.assertEqual, a, b, message)
-
-    def test_assertIsNone(self):
-        self.assertIsNone(None)
-
-        expected_error = 'None is not 0'
-        self.assertFails(expected_error, self.assertIsNone, 0)
-
-    def test_assertIsNotNone(self):
-        self.assertIsNotNone(0)
-        self.assertIsNotNone("0")
-
-        expected_error = 'None matches Is(None)'
-        self.assertFails(expected_error, self.assertIsNotNone, None)
-
-
-    def test_fail_preserves_traceback_detail(self):
-        class Test(TestCase):
-            def test(self):
-                self.addDetail('traceback', text_content('foo'))
-                self.fail('bar')
-        test = Test('test')
-        result = ExtendedTestResult()
-        test.run(result)
-        self.assertEqual(set(['traceback', 'traceback-1']),
-            set(result._events[1][2].keys()))
-
-
-class TestAddCleanup(TestCase):
-    """Tests for TestCase.addCleanup."""
-
-    run_test_with = FullStackRunTest
-
-    class LoggingTest(TestCase):
-        """A test that logs calls to setUp, runTest and tearDown."""
-
-        def setUp(self):
-            TestCase.setUp(self)
-            self._calls = ['setUp']
-
-        def brokenSetUp(self):
-            # A tearDown that deliberately fails.
-            self._calls = ['brokenSetUp']
-            raise RuntimeError('Deliberate Failure')
-
-        def runTest(self):
-            self._calls.append('runTest')
-
-        def brokenTest(self):
-            raise RuntimeError('Deliberate broken test')
-
-        def tearDown(self):
-            self._calls.append('tearDown')
-            TestCase.tearDown(self)
-
-    def setUp(self):
-        TestCase.setUp(self)
-        self._result_calls = []
-        self.test = TestAddCleanup.LoggingTest('runTest')
-        self.logging_result = LoggingResult(self._result_calls)
-
-    def assertErrorLogEqual(self, messages):
-        self.assertEqual(messages, [call[0] for call in self._result_calls])
-
-    def assertTestLogEqual(self, messages):
-        """Assert that the call log equals 'messages'."""
-        case = self._result_calls[0][1]
-        self.assertEqual(messages, case._calls)
-
-    def logAppender(self, message):
-        """A cleanup that appends 'message' to the tests log.
-
-        Cleanups are callables that are added to a test by addCleanup. To
-        verify that our cleanups run in the right order, we add strings to a
-        list that acts as a log. This method returns a cleanup that will add
-        the given message to that log when run.
-        """
-        self.test._calls.append(message)
-
-    def test_fixture(self):
-        # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
-        # This test doesn't test addCleanup itself, it just sanity checks the
-        # fixture.
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
-
-    def test_cleanup_run_before_tearDown(self):
-        # Cleanup functions added with 'addCleanup' are called before tearDown
-        # runs.
-        self.test.addCleanup(self.logAppender, 'cleanup')
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
-
-    def test_add_cleanup_called_if_setUp_fails(self):
-        # Cleanup functions added with 'addCleanup' are called even if setUp
-        # fails. Note that tearDown has a different behavior: it is only
-        # called when setUp succeeds.
-        self.test.setUp = self.test.brokenSetUp
-        self.test.addCleanup(self.logAppender, 'cleanup')
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
-
-    def test_addCleanup_called_in_reverse_order(self):
-        # Cleanup functions added with 'addCleanup' are called in reverse
-        # order.
-        #
-        # One of the main uses of addCleanup is to dynamically create
-        # resources that need some sort of explicit tearDown. Often one
-        # resource will be created in terms of another, e.g.,
-        #     self.first = self.makeFirst()
-        #     self.second = self.makeSecond(self.first)
-        #
-        # When this happens, we generally want to clean up the second resource
-        # before the first one, since the second depends on the first.
-        self.test.addCleanup(self.logAppender, 'first')
-        self.test.addCleanup(self.logAppender, 'second')
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(
-            ['setUp', 'runTest', 'tearDown', 'second', 'first'])
-
-    def test_tearDown_runs_after_cleanup_failure(self):
-        # tearDown runs even if a cleanup function fails.
-        self.test.addCleanup(lambda: 1/0)
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
-
-    def test_cleanups_continue_running_after_error(self):
-        # All cleanups are always run, even if one or two of them fail.
-        self.test.addCleanup(self.logAppender, 'first')
-        self.test.addCleanup(lambda: 1/0)
-        self.test.addCleanup(self.logAppender, 'second')
-        self.test.run(self.logging_result)
-        self.assertTestLogEqual(
-            ['setUp', 'runTest', 'tearDown', 'second', 'first'])
-
-    def test_error_in_cleanups_are_captured(self):
-        # If a cleanup raises an error, we want to record it and fail the the
-        # test, even though we go on to run other cleanups.
-        self.test.addCleanup(lambda: 1/0)
-        self.test.run(self.logging_result)
-        self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
-
-    def test_keyboard_interrupt_not_caught(self):
-        # If a cleanup raises KeyboardInterrupt, it gets reraised.
-        def raiseKeyboardInterrupt():
-            raise KeyboardInterrupt()
-        self.test.addCleanup(raiseKeyboardInterrupt)
-        self.assertThat(lambda:self.test.run(self.logging_result),
-            Raises(MatchesException(KeyboardInterrupt)))
-
-    def test_all_errors_from_MultipleExceptions_reported(self):
-        # When a MultipleExceptions exception is caught, all the errors are
-        # reported.
-        def raiseMany():
-            try:
-                1/0
-            except Exception:
-                exc_info1 = sys.exc_info()
-            try:
-                1/0
-            except Exception:
-                exc_info2 = sys.exc_info()
-            raise MultipleExceptions(exc_info1, exc_info2)
-        self.test.addCleanup(raiseMany)
-        self.logging_result = ExtendedTestResult()
-        self.test.run(self.logging_result)
-        self.assertEqual(['startTest', 'addError', 'stopTest'],
-            [event[0] for event in self.logging_result._events])
-        self.assertEqual(set(['traceback', 'traceback-1']),
-            set(self.logging_result._events[1][2].keys()))
-
-    def test_multipleCleanupErrorsReported(self):
-        # Errors from all failing cleanups are reported as separate backtraces.
-        self.test.addCleanup(lambda: 1/0)
-        self.test.addCleanup(lambda: 1/0)
-        self.logging_result = ExtendedTestResult()
-        self.test.run(self.logging_result)
-        self.assertEqual(['startTest', 'addError', 'stopTest'],
-            [event[0] for event in self.logging_result._events])
-        self.assertEqual(set(['traceback', 'traceback-1']),
-            set(self.logging_result._events[1][2].keys()))
-
-    def test_multipleErrorsCoreAndCleanupReported(self):
-        # Errors from all failing cleanups are reported, with stopTest,
-        # startTest inserted.
-        self.test = TestAddCleanup.LoggingTest('brokenTest')
-        self.test.addCleanup(lambda: 1/0)
-        self.test.addCleanup(lambda: 1/0)
-        self.logging_result = ExtendedTestResult()
-        self.test.run(self.logging_result)
-        self.assertEqual(['startTest', 'addError', 'stopTest'],
-            [event[0] for event in self.logging_result._events])
-        self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
-            set(self.logging_result._events[1][2].keys()))
-
-
-class TestRunTestUsage(TestCase):
-
-    def test_last_resort_in_place(self):
-        class TestBase(TestCase):
-            def test_base_exception(self):
-                raise SystemExit(0)
-        result = ExtendedTestResult()
-        test = TestBase("test_base_exception")
-        self.assertRaises(SystemExit, test.run, result)
-        self.assertFalse(result.wasSuccessful())
-
-
-class TestWithDetails(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    def assertDetailsProvided(self, case, expected_outcome, expected_keys):
-        """Assert that when case is run, details are provided to the result.
-
-        :param case: A TestCase to run.
-        :param expected_outcome: The call that should be made.
-        :param expected_keys: The keys to look for.
-        """
-        result = ExtendedTestResult()
-        case.run(result)
-        case = result._events[0][1]
-        expected = [
-            ('startTest', case),
-            (expected_outcome, case),
-            ('stopTest', case),
-            ]
-        self.assertEqual(3, len(result._events))
-        self.assertEqual(expected[0], result._events[0])
-        self.assertEqual(expected[1], result._events[1][0:2])
-        # Checking the TB is right is rather tricky. doctest line matching
-        # would help, but 'meh'.
-        self.assertEqual(sorted(expected_keys),
-            sorted(result._events[1][2].keys()))
-        self.assertEqual(expected[-1], result._events[-1])
-
-    def get_content(self):
-        return content.Content(
-            content.ContentType("text", "foo"), lambda: [_b('foo')])
-
-
-class TestExpectedFailure(TestWithDetails):
-    """Tests for expected failures and unexpected successess."""
-
-    run_test_with = FullStackRunTest
-
-    def make_unexpected_case(self):
-        class Case(TestCase):
-            def test(self):
-                raise testcase._UnexpectedSuccess
-        case = Case('test')
-        return case
-
-    def test_raising__UnexpectedSuccess_py27(self):
-        case = self.make_unexpected_case()
-        result = Python27TestResult()
-        case.run(result)
-        case = result._events[0][1]
-        self.assertEqual([
-            ('startTest', case),
-            ('addUnexpectedSuccess', case),
-            ('stopTest', case),
-            ], result._events)
-
-    def test_raising__UnexpectedSuccess_extended(self):
-        case = self.make_unexpected_case()
-        result = ExtendedTestResult()
-        case.run(result)
-        case = result._events[0][1]
-        self.assertEqual([
-            ('startTest', case),
-            ('addUnexpectedSuccess', case, {}),
-            ('stopTest', case),
-            ], result._events)
-
-    def make_xfail_case_xfails(self):
-        content = self.get_content()
-        class Case(TestCase):
-            def test(self):
-                self.addDetail("foo", content)
-                self.expectFailure("we are sad", self.assertEqual,
-                    1, 0)
-        case = Case('test')
-        return case
-
-    def make_xfail_case_succeeds(self):
-        content = self.get_content()
-        class Case(TestCase):
-            def test(self):
-                self.addDetail("foo", content)
-                self.expectFailure("we are sad", self.assertEqual,
-                    1, 1)
-        case = Case('test')
-        return case
-
-    def test_expectFailure_KnownFailure_extended(self):
-        case = self.make_xfail_case_xfails()
-        self.assertDetailsProvided(case, "addExpectedFailure",
-            ["foo", "traceback", "reason"])
-
-    def test_expectFailure_KnownFailure_unexpected_success(self):
-        case = self.make_xfail_case_succeeds()
-        self.assertDetailsProvided(case, "addUnexpectedSuccess",
-            ["foo", "reason"])
-
-    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
-    def test_unittest_expectedFailure_decorator_works_with_failure(self):
-        class ReferenceTest(TestCase):
-            @unittest.expectedFailure
-            def test_fails_expectedly(self):
-                self.assertEquals(1, 0)
-
-        test = ReferenceTest('test_fails_expectedly')
-        result = test.run()
-        self.assertEqual(True, result.wasSuccessful())
-
-    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
-    def test_unittest_expectedFailure_decorator_works_with_success(self):
-        class ReferenceTest(TestCase):
-            @unittest.expectedFailure
-            def test_passes_unexpectedly(self):
-                self.assertEquals(1, 1)
-
-        test = ReferenceTest('test_passes_unexpectedly')
-        result = test.run()
-        self.assertEqual(False, result.wasSuccessful())
-
-
-class TestUniqueFactories(TestCase):
-    """Tests for getUniqueString and getUniqueInteger."""
-
-    run_test_with = FullStackRunTest
-
-    def test_getUniqueInteger(self):
-        # getUniqueInteger returns an integer that increments each time you
-        # call it.
-        one = self.getUniqueInteger()
-        self.assertEqual(1, one)
-        two = self.getUniqueInteger()
-        self.assertEqual(2, two)
-
-    def test_getUniqueString(self):
-        # getUniqueString returns the current test id followed by a unique
-        # integer.
-        name_one = self.getUniqueString()
-        self.assertEqual('%s-%d' % (self.id(), 1), name_one)
-        name_two = self.getUniqueString()
-        self.assertEqual('%s-%d' % (self.id(), 2), name_two)
-
-    def test_getUniqueString_prefix(self):
-        # If getUniqueString is given an argument, it uses that argument as
-        # the prefix of the unique string, rather than the test id.
-        name_one = self.getUniqueString('foo')
-        self.assertThat(name_one, Equals('foo-1'))
-        name_two = self.getUniqueString('bar')
-        self.assertThat(name_two, Equals('bar-2'))
-
-
-class TestCloneTestWithNewId(TestCase):
-    """Tests for clone_test_with_new_id."""
-
-    run_test_with = FullStackRunTest
-
-    def test_clone_test_with_new_id(self):
-        class FooTestCase(TestCase):
-            def test_foo(self):
-                pass
-        test = FooTestCase('test_foo')
-        oldName = test.id()
-        newName = self.getUniqueString()
-        newTest = clone_test_with_new_id(test, newName)
-        self.assertEqual(newName, newTest.id())
-        self.assertEqual(oldName, test.id(),
-            "the original test instance should be unchanged.")
-
-    def test_cloned_testcase_does_not_share_details(self):
-        """A cloned TestCase does not share the details dict."""
-        class Test(TestCase):
-            def test_foo(self):
-                self.addDetail(
-                    'foo', content.Content('text/plain', lambda: 'foo'))
-        orig_test = Test('test_foo')
-        cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
-        orig_test.run(unittest.TestResult())
-        self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
-        self.assertEqual(None, cloned_test.getDetails().get('foo'))
-
-
-class TestDetailsProvided(TestWithDetails):
-
-    run_test_with = FullStackRunTest
-
-    def test_addDetail(self):
-        mycontent = self.get_content()
-        self.addDetail("foo", mycontent)
-        details = self.getDetails()
-        self.assertEqual({"foo": mycontent}, details)
-
-    def test_addError(self):
-        class Case(TestCase):
-            def test(this):
-                this.addDetail("foo", self.get_content())
-                1/0
-        self.assertDetailsProvided(Case("test"), "addError",
-            ["foo", "traceback"])
-
-    def test_addFailure(self):
-        class Case(TestCase):
-            def test(this):
-                this.addDetail("foo", self.get_content())
-                self.fail('yo')
-        self.assertDetailsProvided(Case("test"), "addFailure",
-            ["foo", "traceback"])
-
-    def test_addSkip(self):
-        class Case(TestCase):
-            def test(this):
-                this.addDetail("foo", self.get_content())
-                self.skip('yo')
-        self.assertDetailsProvided(Case("test"), "addSkip",
-            ["foo", "reason"])
-
-    def test_addSucccess(self):
-        class Case(TestCase):
-            def test(this):
-                this.addDetail("foo", self.get_content())
-        self.assertDetailsProvided(Case("test"), "addSuccess",
-            ["foo"])
-
-    def test_addUnexpectedSuccess(self):
-        class Case(TestCase):
-            def test(this):
-                this.addDetail("foo", self.get_content())
-                raise testcase._UnexpectedSuccess()
-        self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
-            ["foo"])
-
-    def test_addDetails_from_Mismatch(self):
-        content = self.get_content()
-        class Mismatch(object):
-            def describe(self):
-                return "Mismatch"
-            def get_details(self):
-                return {"foo": content}
-        class Matcher(object):
-            def match(self, thing):
-                return Mismatch()
-            def __str__(self):
-                return "a description"
-        class Case(TestCase):
-            def test(self):
-                self.assertThat("foo", Matcher())
-        self.assertDetailsProvided(Case("test"), "addFailure",
-            ["foo", "traceback"])
-
-    def test_multiple_addDetails_from_Mismatch(self):
-        content = self.get_content()
-        class Mismatch(object):
-            def describe(self):
-                return "Mismatch"
-            def get_details(self):
-                return {"foo": content, "bar": content}
-        class Matcher(object):
-            def match(self, thing):
-                return Mismatch()
-            def __str__(self):
-                return "a description"
-        class Case(TestCase):
-            def test(self):
-                self.assertThat("foo", Matcher())
-        self.assertDetailsProvided(Case("test"), "addFailure",
-            ["bar", "foo", "traceback"])
-
-    def test_addDetails_with_same_name_as_key_from_get_details(self):
-        content = self.get_content()
-        class Mismatch(object):
-            def describe(self):
-                return "Mismatch"
-            def get_details(self):
-                return {"foo": content}
-        class Matcher(object):
-            def match(self, thing):
-                return Mismatch()
-            def __str__(self):
-                return "a description"
-        class Case(TestCase):
-            def test(self):
-                self.addDetail("foo", content)
-                self.assertThat("foo", Matcher())
-        self.assertDetailsProvided(Case("test"), "addFailure",
-            ["foo", "foo-1", "traceback"])
-
-    def test_addDetailUniqueName_works(self):
-        content = self.get_content()
-        class Case(TestCase):
-            def test(self):
-                self.addDetailUniqueName("foo", content)
-                self.addDetailUniqueName("foo", content)
-        self.assertDetailsProvided(Case("test"), "addSuccess",
-            ["foo", "foo-1"])
-
-
-class TestSetupTearDown(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    def test_setUpCalledTwice(self):
-        class CallsTooMuch(TestCase):
-            def test_method(self):
-                self.setUp()
-        result = unittest.TestResult()
-        CallsTooMuch('test_method').run(result)
-        self.assertThat(result.errors, HasLength(1))
-        self.assertThat(result.errors[0][1],
-            DocTestMatches(
-                "...ValueError...File...testtools/tests/test_testcase.py...",
-                ELLIPSIS))
-
-    def test_setUpNotCalled(self):
-        class DoesnotcallsetUp(TestCase):
-            def setUp(self):
-                pass
-            def test_method(self):
-                pass
-        result = unittest.TestResult()
-        DoesnotcallsetUp('test_method').run(result)
-        self.assertThat(result.errors, HasLength(1))
-        self.assertThat(result.errors[0][1],
-            DocTestMatches(
-                "...ValueError...File...testtools/tests/test_testcase.py...",
-                ELLIPSIS))
-
-    def test_tearDownCalledTwice(self):
-        class CallsTooMuch(TestCase):
-            def test_method(self):
-                self.tearDown()
-        result = unittest.TestResult()
-        CallsTooMuch('test_method').run(result)
-        self.assertThat(result.errors, HasLength(1))
-        self.assertThat(result.errors[0][1],
-            DocTestMatches(
-                "...ValueError...File...testtools/tests/test_testcase.py...",
-                ELLIPSIS))
-
-    def test_tearDownNotCalled(self):
-        class DoesnotcalltearDown(TestCase):
-            def test_method(self):
-                pass
-            def tearDown(self):
-                pass
-        result = unittest.TestResult()
-        DoesnotcalltearDown('test_method').run(result)
-        self.assertThat(result.errors, HasLength(1))
-        self.assertThat(result.errors[0][1],
-            DocTestMatches(
-                "...ValueError...File...testtools/tests/test_testcase.py...",
-                ELLIPSIS))
-
-
-require_py27_minimum = skipIf(
-    sys.version < '2.7',
-    "Requires python 2.7 or greater"
-)
-
-
-class TestSkipping(TestCase):
-    """Tests for skipping of tests functionality."""
-
-    run_test_with = FullStackRunTest
-
-    def test_skip_causes_skipException(self):
-        self.assertThat(lambda:self.skip("Skip this test"),
-            Raises(MatchesException(self.skipException)))
-
-    def test_can_use_skipTest(self):
-        self.assertThat(lambda:self.skipTest("Skip this test"),
-            Raises(MatchesException(self.skipException)))
-
-    def test_skip_without_reason_works(self):
-        class Test(TestCase):
-            def test(self):
-                raise self.skipException()
-        case = Test("test")
-        result = ExtendedTestResult()
-        case.run(result)
-        self.assertEqual('addSkip', result._events[1][0])
-        self.assertEqual('no reason given.',
-            result._events[1][2]['reason'].as_text())
-
-    def test_skipException_in_setup_calls_result_addSkip(self):
-        class TestThatRaisesInSetUp(TestCase):
-            def setUp(self):
-                TestCase.setUp(self)
-                self.skip("skipping this test")
-            def test_that_passes(self):
-                pass
-        calls = []
-        result = LoggingResult(calls)
-        test = TestThatRaisesInSetUp("test_that_passes")
-        test.run(result)
-        case = result._events[0][1]
-        self.assertEqual([('startTest', case),
-            ('addSkip', case, "skipping this test"), ('stopTest', case)],
-            calls)
-
-    def test_skipException_in_test_method_calls_result_addSkip(self):
-        class SkippingTest(TestCase):
-            def test_that_raises_skipException(self):
-                self.skip("skipping this test")
-        result = Python27TestResult()
-        test = SkippingTest("test_that_raises_skipException")
-        test.run(result)
-        case = result._events[0][1]
-        self.assertEqual([('startTest', case),
-            ('addSkip', case, "skipping this test"), ('stopTest', case)],
-            result._events)
-
-    def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
-        class SkippingTest(TestCase):
-            def setUp(self):
-                TestCase.setUp(self)
-                raise self.skipException("skipping this test")
-            def test_that_raises_skipException(self):
-                pass
-        result = Python26TestResult()
-        test = SkippingTest("test_that_raises_skipException")
-        test.run(result)
-        self.assertEqual('addSuccess', result._events[1][0])
-
-    def test_skip_with_old_result_object_calls_addError(self):
-        class SkippingTest(TestCase):
-            def test_that_raises_skipException(self):
-                raise self.skipException("skipping this test")
-        result = Python26TestResult()
-        test = SkippingTest("test_that_raises_skipException")
-        test.run(result)
-        self.assertEqual('addSuccess', result._events[1][0])
-
-    def test_skip_decorator(self):
-        class SkippingTest(TestCase):
-            @skip("skipping this test")
-            def test_that_is_decorated_with_skip(self):
-                self.fail()
-        result = Python26TestResult()
-        test = SkippingTest("test_that_is_decorated_with_skip")
-        test.run(result)
-        self.assertEqual('addSuccess', result._events[1][0])
-
-    def test_skipIf_decorator(self):
-        class SkippingTest(TestCase):
-            @skipIf(True, "skipping this test")
-            def test_that_is_decorated_with_skipIf(self):
-                self.fail()
-        result = Python26TestResult()
-        test = SkippingTest("test_that_is_decorated_with_skipIf")
-        test.run(result)
-        self.assertEqual('addSuccess', result._events[1][0])
-
-    def test_skipUnless_decorator(self):
-        class SkippingTest(TestCase):
-            @skipUnless(False, "skipping this test")
-            def test_that_is_decorated_with_skipUnless(self):
-                self.fail()
-        result = Python26TestResult()
-        test = SkippingTest("test_that_is_decorated_with_skipUnless")
-        test.run(result)
-        self.assertEqual('addSuccess', result._events[1][0])
-
-    def check_skip_decorator_does_not_run_setup(self, decorator, reason):
-        class SkippingTest(TestCase):
-
-            setup_ran = False
-
-            def setUp(self):
-                super(SkippingTest, self).setUp()
-                self.setup_ran = True
-
-            # Use the decorator passed to us:
-            @decorator
-            def test_skipped(self):
-                self.fail()
-
-        test = SkippingTest('test_skipped')
-        result = test.run()
-        self.assertTrue(result.wasSuccessful())
-        self.assertTrue(reason in result.skip_reasons, result.skip_reasons)
-        self.assertFalse(test.setup_ran)
-
-    def test_testtools_skip_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            skip(reason),
-            reason
-        )
-
-    def test_testtools_skipIf_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            skipIf(True, reason),
-            reason
-        )
-
-    def test_testtools_skipUnless_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            skipUnless(False, reason),
-            reason
-        )
-
-    @require_py27_minimum
-    def test_unittest_skip_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            unittest.skip(reason),
-            reason
-        )
-
-    @require_py27_minimum
-    def test_unittest_skipIf_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            unittest.skipIf(True, reason),
-            reason
-        )
-
-    @require_py27_minimum
-    def test_unittest_skipUnless_decorator_does_not_run_setUp(self):
-        reason = self.getUniqueString()
-        self.check_skip_decorator_does_not_run_setup(
-            unittest.skipUnless(False, reason),
-            reason
-        )
-
-
-class TestOnException(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    def test_default_works(self):
-        events = []
-        class Case(TestCase):
-            def method(self):
-                self.onException(an_exc_info)
-                events.append(True)
-        case = Case("method")
-        case.run()
-        self.assertThat(events, Equals([True]))
-
-    def test_added_handler_works(self):
-        events = []
-        class Case(TestCase):
-            def method(self):
-                self.addOnException(events.append)
-                self.onException(an_exc_info)
-        case = Case("method")
-        case.run()
-        self.assertThat(events, Equals([an_exc_info]))
-
-    def test_handler_that_raises_is_not_caught(self):
-        events = []
-        class Case(TestCase):
-            def method(self):
-                self.addOnException(events.index)
-                self.assertThat(lambda: self.onException(an_exc_info),
-                    Raises(MatchesException(ValueError)))
-        case = Case("method")
-        case.run()
-        self.assertThat(events, Equals([]))
-
-
-class TestPatchSupport(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    class Case(TestCase):
-        def test(self):
-            pass
-
-    def test_patch(self):
-        # TestCase.patch masks obj.attribute with the new value.
-        self.foo = 'original'
-        test = self.Case('test')
-        test.patch(self, 'foo', 'patched')
-        self.assertEqual('patched', self.foo)
-
-    def test_patch_restored_after_run(self):
-        # TestCase.patch masks obj.attribute with the new value, but restores
-        # the original value after the test is finished.
-        self.foo = 'original'
-        test = self.Case('test')
-        test.patch(self, 'foo', 'patched')
-        test.run()
-        self.assertEqual('original', self.foo)
-
-    def test_successive_patches_apply(self):
-        # TestCase.patch can be called multiple times per test. Each time you
-        # call it, it overrides the original value.
-        self.foo = 'original'
-        test = self.Case('test')
-        test.patch(self, 'foo', 'patched')
-        test.patch(self, 'foo', 'second')
-        self.assertEqual('second', self.foo)
-
-    def test_successive_patches_restored_after_run(self):
-        # TestCase.patch restores the original value, no matter how many times
-        # it was called.
-        self.foo = 'original'
-        test = self.Case('test')
-        test.patch(self, 'foo', 'patched')
-        test.patch(self, 'foo', 'second')
-        test.run()
-        self.assertEqual('original', self.foo)
-
-    def test_patch_nonexistent_attribute(self):
-        # TestCase.patch can be used to patch a non-existent attribute.
-        test = self.Case('test')
-        test.patch(self, 'doesntexist', 'patched')
-        self.assertEqual('patched', self.doesntexist)
-
-    def test_restore_nonexistent_attribute(self):
-        # TestCase.patch can be used to patch a non-existent attribute, after
-        # the test run, the attribute is then removed from the object.
-        test = self.Case('test')
-        test.patch(self, 'doesntexist', 'patched')
-        test.run()
-        marker = object()
-        value = getattr(self, 'doesntexist', marker)
-        self.assertIs(marker, value)
-
-
-class TestTestCaseSuper(TestCase):
-
-    run_test_with = FullStackRunTest
-
-    def test_setup_uses_super(self):
-        class OtherBaseCase(unittest.TestCase):
-            setup_called = False
-            def setUp(self):
-                self.setup_called = True
-                super(OtherBaseCase, self).setUp()
-        class OurCase(TestCase, OtherBaseCase):
-            def runTest(self):
-                pass
-        test = OurCase()
-        test.setUp()
-        test.tearDown()
-        self.assertTrue(test.setup_called)
-
-    def test_teardown_uses_super(self):
-        class OtherBaseCase(unittest.TestCase):
-            teardown_called = False
-            def tearDown(self):
-                self.teardown_called = True
-                super(OtherBaseCase, self).tearDown()
-        class OurCase(TestCase, OtherBaseCase):
-            def runTest(self):
-                pass
-        test = OurCase()
-        test.setUp()
-        test.tearDown()
-        self.assertTrue(test.teardown_called)
-
-
-class TestNullary(TestCase):
-
-    def test_repr(self):
-        # The repr() of nullary is the same as the repr() of the wrapped
-        # function.
-        def foo():
-            pass
-        wrapped = Nullary(foo)
-        self.assertEqual(repr(wrapped), repr(foo))
-
-    def test_called_with_arguments(self):
-        # The function is called with the arguments given to Nullary's
-        # constructor.
-        l = []
-        def foo(*args, **kwargs):
-            l.append((args, kwargs))
-        wrapped = Nullary(foo, 1, 2, a="b")
-        wrapped()
-        self.assertEqual(l, [((1, 2), {'a': 'b'})])
-
-    def test_returns_wrapped(self):
-        # Calling Nullary returns whatever the function returns.
-        ret = object()
-        wrapped = Nullary(lambda: ret)
-        self.assertIs(ret, wrapped())
-
-    def test_raises(self):
-        # If the function raises, so does Nullary when called.
-        wrapped = Nullary(lambda: 1/0)
-        self.assertRaises(ZeroDivisionError, wrapped)
-
-
-class TestAttributes(TestCase):
-
-    def test_simple_attr(self):
-        # Adding an attr to a test changes its id().
-        class MyTest(WithAttributes, TestCase):
-            @attr('foo')
-            def test_bar(self):
-                pass
-        case = MyTest('test_bar')
-        self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
-            case.id())
-
-    def test_multiple_attributes(self):
-        class MyTest(WithAttributes, TestCase):
-            # Not sorted here, forward or backwards.
-            @attr('foo', 'quux', 'bar')
-            def test_bar(self):
-                pass
-        case = MyTest('test_bar')
-        self.assertEqual(
-            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
-            case.id())
-
-    def test_multiple_attr_decorators(self):
-        class MyTest(WithAttributes, TestCase):
-            # Not sorted here, forward or backwards.
-            @attr('bar')
-            @attr('quux')
-            @attr('foo')
-            def test_bar(self):
-                pass
-        case = MyTest('test_bar')
-        self.assertEqual(
-            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
-            case.id())
-
-
-class TestDecorateTestCaseResult(TestCase):
-
-    def setUp(self):
-        super(TestDecorateTestCaseResult, self).setUp()
-        self.log = []
-
-    def make_result(self, result):
-        self.log.append(('result', result))
-        return LoggingResult(self.log)
-
-    def test___call__(self):
-        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
-        case(None)
-        case('something')
-        self.assertEqual([('result', None),
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set()),
-            ('result', 'something'),
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set())
-            ], self.log)
-
-    def test_run(self):
-        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
-        case.run(None)
-        case.run('something')
-        self.assertEqual([('result', None),
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set()),
-            ('result', 'something'),
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set())
-            ], self.log)
-
-    def test_before_after_hooks(self):
-        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
-            before_run=lambda result: self.log.append('before'),
-            after_run=lambda result: self.log.append('after'))
-        case.run(None)
-        case(None)
-        self.assertEqual([
-            ('result', None),
-            'before',
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set()),
-            'after',
-            ('result', None),
-            'before',
-            ('tags', set(), set()),
-            ('startTest', case.decorated),
-            ('addSuccess', case.decorated),
-            ('stopTest', case.decorated),
-            ('tags', set(), set()),
-            'after',
-            ], self.log)
-
-    def test_other_attribute(self):
-        orig = PlaceHolder('foo')
-        orig.thing = 'fred'
-        case = DecorateTestCaseResult(orig, self.make_result)
-        self.assertEqual('fred', case.thing)
-        self.assertRaises(AttributeError, getattr, case, 'other')
-        case.other = 'barbara'
-        self.assertEqual('barbara', orig.other)
-        del case.thing
-        self.assertRaises(AttributeError, getattr, orig, 'thing')
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testresult.py b/lib/testtools/testtools/tests/test_testresult.py
deleted file mode 100644
index a8034b2..0000000
--- a/lib/testtools/testtools/tests/test_testresult.py
+++ /dev/null
@@ -1,2913 +0,0 @@
-# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
-
-"""Test TestResults and related things."""
-
-__metaclass__ = type
-
-import codecs
-import datetime
-import doctest
-from itertools import chain, combinations
-import os
-import re
-import shutil
-import sys
-import tempfile
-import threading
-from unittest import TestSuite
-import warnings
-
-from extras import safe_hasattr, try_imports
-
-Queue = try_imports(['Queue.Queue', 'queue.Queue'])
-
-from testtools import (
-    CopyStreamResult,
-    ExtendedToOriginalDecorator,
-    ExtendedToStreamDecorator,
-    MultiTestResult,
-    PlaceHolder,
-    StreamFailFast,
-    StreamResult,
-    StreamResultRouter,
-    StreamSummary,
-    StreamTagger,
-    StreamToDict,
-    StreamToExtendedDecorator,
-    StreamToQueue,
-    Tagger,
-    TestCase,
-    TestControl,
-    TestResult,
-    TestResultDecorator,
-    TestByTestResult,
-    TextTestResult,
-    ThreadsafeForwardingResult,
-    TimestampingStreamResult,
-    testresult,
-    )
-from testtools.compat import (
-    _b,
-    _get_exception_encoding,
-    _r,
-    _u,
-    advance_iterator,
-    str_is_unicode,
-    StringIO,
-    )
-from testtools.content import (
-    Content,
-    content_from_stream,
-    text_content,
-    TracebackContent,
-    )
-from testtools.content_type import ContentType, UTF8_TEXT
-from testtools.matchers import (
-    AllMatch,
-    Contains,
-    DocTestMatches,
-    Equals,
-    HasLength,
-    MatchesAny,
-    MatchesException,
-    MatchesRegex,
-    Raises,
-    )
-from testtools.tests.helpers import (
-    an_exc_info,
-    FullStackRunTest,
-    LoggingResult,
-    run_with_stack_hidden,
-    )
-from testtools.testresult.doubles import (
-    Python26TestResult,
-    Python27TestResult,
-    ExtendedTestResult,
-    StreamResult as LoggingStreamResult,
-    )
-from testtools.testresult.real import (
-    _details_to_str,
-    _merge_tags,
-    utc,
-    )
-
-
-def make_erroring_test():
-    class Test(TestCase):
-        def error(self):
-            1/0
-    return Test("error")
-
-
-def make_failing_test():
-    class Test(TestCase):
-        def failed(self):
-            self.fail("yo!")
-    return Test("failed")
-
-
-def make_mismatching_test():
-    class Test(TestCase):
-        def mismatch(self):
-            self.assertEqual(1, 2)
-    return Test("mismatch")
-
-
-def make_unexpectedly_successful_test():
-    class Test(TestCase):
-        def succeeded(self):
-            self.expectFailure("yo!", lambda: None)
-    return Test("succeeded")
-
-
-def make_test():
-    class Test(TestCase):
-        def test(self):
-            pass
-    return Test("test")
-
-
-def make_exception_info(exceptionFactory, *args, **kwargs):
-    try:
-        raise exceptionFactory(*args, **kwargs)
-    except:
-        return sys.exc_info()
-
-
-class Python26Contract(object):
-
-    def test_fresh_result_is_successful(self):
-        # A result is considered successful before any tests are run.
-        result = self.makeResult()
-        self.assertTrue(result.wasSuccessful())
-
-    def test_addError_is_failure(self):
-        # addError fails the test run.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addError(self, an_exc_info)
-        result.stopTest(self)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_addFailure_is_failure(self):
-        # addFailure fails the test run.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addFailure(self, an_exc_info)
-        result.stopTest(self)
-        self.assertFalse(result.wasSuccessful())
-
-    def test_addSuccess_is_success(self):
-        # addSuccess does not fail the test run.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addSuccess(self)
-        result.stopTest(self)
-        self.assertTrue(result.wasSuccessful())
-
-    def test_stop_sets_shouldStop(self):
-        result = self.makeResult()
-        result.stop()
-        self.assertTrue(result.shouldStop)
-
-
-class Python27Contract(Python26Contract):
-
-    def test_addExpectedFailure(self):
-        # Calling addExpectedFailure(test, exc_info) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addExpectedFailure(self, an_exc_info)
-
-    def test_addExpectedFailure_is_success(self):
-        # addExpectedFailure does not fail the test run.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addExpectedFailure(self, an_exc_info)
-        result.stopTest(self)
-        self.assertTrue(result.wasSuccessful())
-
-    def test_addSkipped(self):
-        # Calling addSkip(test, reason) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addSkip(self, _u("Skipped for some reason"))
-
-    def test_addSkip_is_success(self):
-        # addSkip does not fail the test run.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addSkip(self, _u("Skipped for some reason"))
-        result.stopTest(self)
-        self.assertTrue(result.wasSuccessful())
-
-    def test_addUnexpectedSuccess(self):
-        # Calling addUnexpectedSuccess(test) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addUnexpectedSuccess(self)
-
-    def test_addUnexpectedSuccess_was_successful(self):
-        # addUnexpectedSuccess does not fail the test run in Python 2.7.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addUnexpectedSuccess(self)
-        result.stopTest(self)
-        self.assertTrue(result.wasSuccessful())
-
-    def test_startStopTestRun(self):
-        # Calling startTestRun completes ok.
-        result = self.makeResult()
-        result.startTestRun()
-        result.stopTestRun()
-
-    def test_failfast(self):
-        result = self.makeResult()
-        result.failfast = True
-        class Failing(TestCase):
-            def test_a(self):
-                self.fail('a')
-            def test_b(self):
-                self.fail('b')
-        TestSuite([Failing('test_a'), Failing('test_b')]).run(result)
-        self.assertEqual(1, result.testsRun)
-
-
-class TagsContract(Python27Contract):
-    """Tests to ensure correct tagging behaviour.
-
-    See the subunit docs for guidelines on how this is supposed to work.
-    """
-
-    def test_no_tags_by_default(self):
-        # Results initially have no tags.
-        result = self.makeResult()
-        result.startTestRun()
-        self.assertEqual(frozenset(), result.current_tags)
-
-    def test_adding_tags(self):
-        # Tags are added using 'tags' and thus become visible in
-        # 'current_tags'.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        self.assertEqual(set(['foo']), result.current_tags)
-
-    def test_removing_tags(self):
-        # Tags are removed using 'tags'.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.tags(set(), set(['foo']))
-        self.assertEqual(set(), result.current_tags)
-
-    def test_startTestRun_resets_tags(self):
-        # startTestRun makes a new test run, and thus clears all the tags.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.startTestRun()
-        self.assertEqual(set(), result.current_tags)
-
-    def test_add_tags_within_test(self):
-        # Tags can be added after a test has run.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.startTest(self)
-        result.tags(set(['bar']), set())
-        self.assertEqual(set(['foo', 'bar']), result.current_tags)
-
-    def test_tags_added_in_test_are_reverted(self):
-        # Tags added during a test run are then reverted once that test has
-        # finished.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.startTest(self)
-        result.tags(set(['bar']), set())
-        result.addSuccess(self)
-        result.stopTest(self)
-        self.assertEqual(set(['foo']), result.current_tags)
-
-    def test_tags_removed_in_test(self):
-        # Tags can be removed during tests.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.startTest(self)
-        result.tags(set(), set(['foo']))
-        self.assertEqual(set(), result.current_tags)
-
-    def test_tags_removed_in_test_are_restored(self):
-        # Tags removed during tests are restored once that test has finished.
-        result = self.makeResult()
-        result.startTestRun()
-        result.tags(set(['foo']), set())
-        result.startTest(self)
-        result.tags(set(), set(['foo']))
-        result.addSuccess(self)
-        result.stopTest(self)
-        self.assertEqual(set(['foo']), result.current_tags)
-
-
-class DetailsContract(TagsContract):
-    """Tests for the details API of TestResults."""
-
-    def test_addExpectedFailure_details(self):
-        # Calling addExpectedFailure(test, details=xxx) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addExpectedFailure(self, details={})
-
-    def test_addError_details(self):
-        # Calling addError(test, details=xxx) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addError(self, details={})
-
-    def test_addFailure_details(self):
-        # Calling addFailure(test, details=xxx) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addFailure(self, details={})
-
-    def test_addSkipped_details(self):
-        # Calling addSkip(test, reason) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addSkip(self, details={})
-
-    def test_addUnexpectedSuccess_details(self):
-        # Calling addUnexpectedSuccess(test) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addUnexpectedSuccess(self, details={})
-
-    def test_addSuccess_details(self):
-        # Calling addSuccess(test) completes ok.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addSuccess(self, details={})
-
-
-class FallbackContract(DetailsContract):
-    """When we fallback we take our policy choice to map calls.
-
-    For instance, we map unexpectedSuccess to an error code, not to success.
-    """
-
-    def test_addUnexpectedSuccess_was_successful(self):
-        # addUnexpectedSuccess fails test run in testtools.
-        result = self.makeResult()
-        result.startTest(self)
-        result.addUnexpectedSuccess(self)
-        result.stopTest(self)
-        self.assertFalse(result.wasSuccessful())
-
-
-class StartTestRunContract(FallbackContract):
-    """Defines the contract for testtools policy choices.
-
-    That is things which are not simply extensions to unittest but choices we
-    have made differently.
-    """
-
-    def test_startTestRun_resets_unexpected_success(self):
-        result = self.makeResult()
-        result.startTest(self)
-        result.addUnexpectedSuccess(self)
-        result.stopTest(self)
-        result.startTestRun()
-        self.assertTrue(result.wasSuccessful())
-
-    def test_startTestRun_resets_failure(self):
-        result = self.makeResult()
-        result.startTest(self)
-        result.addFailure(self, an_exc_info)
-        result.stopTest(self)
-        result.startTestRun()
-        self.assertTrue(result.wasSuccessful())
-
-    def test_startTestRun_resets_errors(self):
-        result = self.makeResult()
-        result.startTest(self)
-        result.addError(self, an_exc_info)
-        result.stopTest(self)
-        result.startTestRun()
-        self.assertTrue(result.wasSuccessful())
-
-
-class TestTestResultContract(TestCase, StartTestRunContract):
-
-    run_test_with = FullStackRunTest
-
-    def makeResult(self):
-        return TestResult()
-
-
-class TestMultiTestResultContract(TestCase, StartTestRunContract):
-
-    run_test_with = FullStackRunTest
-
-    def makeResult(self):
-        return MultiTestResult(TestResult(), TestResult())
-
-
-class TestTextTestResultContract(TestCase, StartTestRunContract):
-
-    run_test_with = FullStackRunTest
-
-    def makeResult(self):
-        return TextTestResult(StringIO())
-
-
-class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
-
-    run_test_with = FullStackRunTest
-
-    def makeResult(self):
-        result_semaphore = threading.Semaphore(1)
-        target = TestResult()
-        return ThreadsafeForwardingResult(target, result_semaphore)
-
-
-class TestExtendedTestResultContract(TestCase, StartTestRunContract):
-
-    def makeResult(self):
-        return ExtendedTestResult()
-
-
-class TestPython26TestResultContract(TestCase, Python26Contract):
-
-    def makeResult(self):
-        return Python26TestResult()
-
-
-class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
-
-    def makeResult(self):
-        return ExtendedToOriginalDecorator(Python26TestResult())
-
-
-class TestPython27TestResultContract(TestCase, Python27Contract):
-
-    def makeResult(self):
-        return Python27TestResult()
-
-
-class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
-
-    def makeResult(self):
-        return ExtendedToOriginalDecorator(Python27TestResult())
-
-
-class TestAdaptedStreamResult(TestCase, DetailsContract):
-
-    def makeResult(self):
-        return ExtendedToStreamDecorator(StreamResult())
-
-
-class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
-
-    run_test_with = FullStackRunTest
-
-    def makeResult(self):
-        return TestResultDecorator(TestResult())
-
-
-# DetailsContract because ExtendedToStreamDecorator follows Python for
-# uxsuccess handling.
-class TestStreamToExtendedContract(TestCase, DetailsContract):
-
-    def makeResult(self):
-        return ExtendedToStreamDecorator(
-            StreamToExtendedDecorator(ExtendedTestResult()))
-
-
-class TestStreamResultContract(object):
-
-    def _make_result(self):
-        raise NotImplementedError(self._make_result)
-
-    def test_startTestRun(self):
-        result = self._make_result()
-        result.startTestRun()
-        result.stopTestRun()
-
-    def test_files(self):
-        # Test parameter combinations when files are being emitted.
-        result = self._make_result()
-        result.startTestRun()
-        self.addCleanup(result.stopTestRun)
-        now = datetime.datetime.now(utc)
-        inputs = list(dict(
-            eof=True,
-            mime_type="text/plain",
-            route_code=_u("1234"),
-            test_id=_u("foo"),
-            timestamp=now,
-            ).items())
-        param_dicts = self._power_set(inputs)
-        for kwargs in param_dicts:
-            result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
-            result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
-
-    def test_test_status(self):
-        # Tests non-file attachment parameter combinations.
-        result = self._make_result()
-        result.startTestRun()
-        self.addCleanup(result.stopTestRun)
-        now = datetime.datetime.now(utc)
-        args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
-            'uxsuccess', 'success', 'fail', 'skip']]
-        inputs = list(dict(
-            runnable=False,
-            test_tags=set(['quux']),
-            route_code=_u("1234"),
-            timestamp=now,
-            ).items())
-        param_dicts = self._power_set(inputs)
-        for kwargs in param_dicts:
-            for arg in args:
-                result.status(test_id=arg[0], test_status=arg[1], **kwargs)
-
-    def _power_set(self, iterable):
-        "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
-        s = list(iterable)
-        param_dicts = []
-        for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
-            param_dicts.append(dict(ss))
-        return param_dicts
-
-
-class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamResult()
-
-
-class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return CopyStreamResult([StreamResult(), StreamResult()])
-
-
-class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return LoggingStreamResult()
-
-
-class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return ExtendedToStreamDecorator(StreamResult())
-
-
-class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamSummary()
-
-
-class TestStreamTaggerContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamTagger([StreamResult()], add=set(), discard=set())
-
-
-class TestStreamToDictContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamToDict(lambda x:None)
-
-
-class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamToExtendedDecorator(ExtendedTestResult())
-
-
-class TestStreamToQueueContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        queue = Queue()
-        return StreamToQueue(queue, "foo")
-
-
-class TestStreamFailFastContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamFailFast(lambda:None)
-
-
-class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
-
-    def _make_result(self):
-        return StreamResultRouter(StreamResult())
-
-
-class TestDoubleStreamResultEvents(TestCase):
-
-    def test_startTestRun(self):
-        result = LoggingStreamResult()
-        result.startTestRun()
-        self.assertEqual([('startTestRun',)], result._events)
-
-    def test_stopTestRun(self):
-        result = LoggingStreamResult()
-        result.startTestRun()
-        result.stopTestRun()
-        self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
-
-    def test_file(self):
-        result = LoggingStreamResult()
-        result.startTestRun()
-        now = datetime.datetime.now(utc)
-        result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
-            test_id="id", route_code='abc', timestamp=now)
-        self.assertEqual(
-            [('startTestRun',),
-             ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
-            result._events)
-
-    def test_status(self):
-        result = LoggingStreamResult()
-        result.startTestRun()
-        now = datetime.datetime.now(utc)
-        result.status("foo", "success", test_tags=set(['tag']),
-            runnable=False, route_code='abc', timestamp=now)
-        self.assertEqual(
-            [('startTestRun',),
-             ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
-            result._events)
-
-
-class TestCopyStreamResultCopies(TestCase):
-
-    def setUp(self):
-        super(TestCopyStreamResultCopies, self).setUp()
-        self.target1 = LoggingStreamResult()
-        self.target2 = LoggingStreamResult()
-        self.targets = [self.target1._events, self.target2._events]
-        self.result = CopyStreamResult([self.target1, self.target2])
-
-    def test_startTestRun(self):
-        self.result.startTestRun()
-        self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
-
-    def test_stopTestRun(self):
-        self.result.startTestRun()
-        self.result.stopTestRun()
-        self.assertThat(self.targets,
-            AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
-
-    def test_status(self):
-        self.result.startTestRun()
-        now = datetime.datetime.now(utc)
-        self.result.status("foo", "success", test_tags=set(['tag']),
-            runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
-            mime_type="text/json", route_code='abc', timestamp=now)
-        self.assertThat(self.targets,
-            AllMatch(Equals([('startTestRun',),
-                ('status', 'foo', 'success', set(['tag']), False, "foo",
-                 b'bar', True, "text/json", 'abc', now)
-                ])))
-
-
-class TestStreamTagger(TestCase):
-
-    def test_adding(self):
-        log = LoggingStreamResult()
-        result = StreamTagger([log], add=['foo'])
-        result.startTestRun()
-        result.status()
-        result.status(test_tags=set(['bar']))
-        result.status(test_tags=None)
-        result.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
-            ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
-            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
-            ('stopTestRun',),
-            ], log._events)
-
-    def test_discarding(self):
-        log = LoggingStreamResult()
-        result = StreamTagger([log], discard=['foo'])
-        result.startTestRun()
-        result.status()
-        result.status(test_tags=None)
-        result.status(test_tags=set(['foo']))
-        result.status(test_tags=set(['bar']))
-        result.status(test_tags=set(['foo', 'bar']))
-        result.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('status', None, None, None, True, None, None, False, None, None, None),
-            ('status', None, None, None, True, None, None, False, None, None, None),
-            ('status', None, None, None, True, None, None, False, None, None, None),
-            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
-            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
-            ('stopTestRun',),
-            ], log._events)
-
-
-class TestStreamToDict(TestCase):
-
-    def test_hung_test(self):
-        tests = []
-        result = StreamToDict(tests.append)
-        result.startTestRun()
-        result.status('foo', 'inprogress')
-        self.assertEqual([], tests)
-        result.stopTestRun()
-        self.assertEqual([
-            {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
-             'timestamps': [None, None]}
-            ], tests)
-
-    def test_all_terminal_states_reported(self):
-        tests = []
-        result = StreamToDict(tests.append)
-        result.startTestRun()
-        result.status('success', 'success')
-        result.status('skip', 'skip')
-        result.status('exists', 'exists')
-        result.status('fail', 'fail')
-        result.status('xfail', 'xfail')
-        result.status('uxsuccess', 'uxsuccess')
-        self.assertThat(tests, HasLength(6))
-        self.assertEqual(
-            ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
-            [test['id'] for test in tests])
-        result.stopTestRun()
-        self.assertThat(tests, HasLength(6))
-
-    def test_files_reported(self):
-        tests = []
-        result = StreamToDict(tests.append)
-        result.startTestRun()
-        result.status(file_name="some log.txt",
-            file_bytes=_b("1234 log message"), eof=True,
-            mime_type="text/plain; charset=utf8", test_id="foo.bar")
-        result.status(file_name="another file",
-            file_bytes=_b("""Traceback..."""), test_id="foo.bar")
-        result.stopTestRun()
-        self.assertThat(tests, HasLength(1))
-        test = tests[0]
-        self.assertEqual("foo.bar", test['id'])
-        self.assertEqual("unknown", test['status'])
-        details = test['details']
-        self.assertEqual(
-            _u("1234 log message"), details['some log.txt'].as_text())
-        self.assertEqual(
-            _b("Traceback..."),
-            _b('').join(details['another file'].iter_bytes()))
-        self.assertEqual(
-            "application/octet-stream", repr(details['another file'].content_type))
-
-    def test_bad_mime(self):
-        # Testtools was making bad mime types, this tests that the specific
-        # corruption is catered for.
-        tests = []
-        result = StreamToDict(tests.append)
-        result.startTestRun()
-        result.status(file_name="file", file_bytes=b'a',
-            mime_type='text/plain; charset=utf8, language=python',
-            test_id='id')
-        result.stopTestRun()
-        self.assertThat(tests, HasLength(1))
-        test = tests[0]
-        self.assertEqual("id", test['id'])
-        details = test['details']
-        self.assertEqual(_u("a"), details['file'].as_text())
-        self.assertEqual(
-            "text/plain; charset=\"utf8\"",
-            repr(details['file'].content_type))
-
-    def test_timestamps(self):
-        tests = []
-        result = StreamToDict(tests.append)
-        result.startTestRun()
-        result.status(test_id='foo', test_status='inprogress', timestamp="A")
-        result.status(test_id='foo', test_status='success', timestamp="B")
-        result.status(test_id='bar', test_status='inprogress', timestamp="C")
-        result.stopTestRun()
-        self.assertThat(tests, HasLength(2))
-        self.assertEqual(["A", "B"], tests[0]['timestamps'])
-        self.assertEqual(["C", None], tests[1]['timestamps'])
-
-
-class TestExtendedToStreamDecorator(TestCase):
-
-    def test_explicit_time(self):
-        log = LoggingStreamResult()
-        result = ExtendedToStreamDecorator(log)
-        result.startTestRun()
-        now = datetime.datetime.now(utc)
-        result.time(now)
-        result.startTest(self)
-        result.addSuccess(self)
-        result.stopTest(self)
-        result.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('status',
-             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
-             'inprogress',
-             None,
-             True,
-             None,
-             None,
-             False,
-             None,
-             None,
-             now),
-            ('status',
-             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
-             'success',
-              set(),
-              True,
-              None,
-              None,
-              False,
-              None,
-              None,
-              now),
-             ('stopTestRun',)], log._events)
-
-    def test_wasSuccessful_after_stopTestRun(self):
-        log = LoggingStreamResult()
-        result = ExtendedToStreamDecorator(log)
-        result.startTestRun()
-        result.status(test_id='foo', test_status='fail')
-        result.stopTestRun()
-        self.assertEqual(False, result.wasSuccessful())
-
-
-class TestStreamFailFast(TestCase):
-
-    def test_inprogress(self):
-        result = StreamFailFast(self.fail)
-        result.status('foo', 'inprogress')
-
-    def test_exists(self):
-        result = StreamFailFast(self.fail)
-        result.status('foo', 'exists')
-
-    def test_xfail(self):
-        result = StreamFailFast(self.fail)
-        result.status('foo', 'xfail')
-
-    def test_uxsuccess(self):
-        calls = []
-        def hook():
-            calls.append("called")
-        result = StreamFailFast(hook)
-        result.status('foo', 'uxsuccess')
-        result.status('foo', 'uxsuccess')
-        self.assertEqual(['called', 'called'], calls)
-
-    def test_success(self):
-        result = StreamFailFast(self.fail)
-        result.status('foo', 'success')
-
-    def test_fail(self):
-        calls = []
-        def hook():
-            calls.append("called")
-        result = StreamFailFast(hook)
-        result.status('foo', 'fail')
-        result.status('foo', 'fail')
-        self.assertEqual(['called', 'called'], calls)
-
-    def test_skip(self):
-        result = StreamFailFast(self.fail)
-        result.status('foo', 'skip')
-
-
-class TestStreamSummary(TestCase):
-
-    def test_attributes(self):
-        result = StreamSummary()
-        result.startTestRun()
-        self.assertEqual([], result.failures)
-        self.assertEqual([], result.errors)
-        self.assertEqual([], result.skipped)
-        self.assertEqual([], result.expectedFailures)
-        self.assertEqual([], result.unexpectedSuccesses)
-        self.assertEqual(0, result.testsRun)
-
-    def test_startTestRun(self):
-        result = StreamSummary()
-        result.startTestRun()
-        result.failures.append('x')
-        result.errors.append('x')
-        result.skipped.append('x')
-        result.expectedFailures.append('x')
-        result.unexpectedSuccesses.append('x')
-        result.testsRun = 1
-        result.startTestRun()
-        self.assertEqual([], result.failures)
-        self.assertEqual([], result.errors)
-        self.assertEqual([], result.skipped)
-        self.assertEqual([], result.expectedFailures)
-        self.assertEqual([], result.unexpectedSuccesses)
-        self.assertEqual(0, result.testsRun)
-
-    def test_wasSuccessful(self):
-        # wasSuccessful returns False if any of
-        # failures/errors is non-empty.
-        result = StreamSummary()
-        result.startTestRun()
-        self.assertEqual(True, result.wasSuccessful())
-        result.failures.append('x')
-        self.assertEqual(False, result.wasSuccessful())
-        result.startTestRun()
-        result.errors.append('x')
-        self.assertEqual(False, result.wasSuccessful())
-        result.startTestRun()
-        result.skipped.append('x')
-        self.assertEqual(True, result.wasSuccessful())
-        result.startTestRun()
-        result.expectedFailures.append('x')
-        self.assertEqual(True, result.wasSuccessful())
-        result.startTestRun()
-        result.unexpectedSuccesses.append('x')
-        self.assertEqual(True, result.wasSuccessful())
-
-    def test_stopTestRun(self):
-        result = StreamSummary()
-        # terminal successful codes.
-        result.startTestRun()
-        result.status("foo", "inprogress")
-        result.status("foo", "success")
-        result.status("bar", "skip")
-        result.status("baz", "exists")
-        result.stopTestRun()
-        self.assertEqual(True, result.wasSuccessful())
-        # Existence is terminal but doesn't count as 'running' a test.
-        self.assertEqual(2, result.testsRun)
-
-    def test_stopTestRun_inprogress_test_fails(self):
-        # Tests inprogress at stopTestRun trigger a failure.
-        result = StreamSummary()
-        result.startTestRun()
-        result.status("foo", "inprogress")
-        result.stopTestRun()
-        self.assertEqual(False, result.wasSuccessful())
-        self.assertThat(result.errors, HasLength(1))
-        self.assertEqual("foo", result.errors[0][0].id())
-        self.assertEqual("Test did not complete", result.errors[0][1])
-        # interim state detection handles route codes - while duplicate ids in
-        # one run is undesirable, it may happen (e.g. with repeated tests).
-        result.startTestRun()
-        result.status("foo", "inprogress")
-        result.status("foo", "inprogress", route_code="A")
-        result.status("foo", "success", route_code="A")
-        result.stopTestRun()
-        self.assertEqual(False, result.wasSuccessful())
-
-    def test_status_skip(self):
-        # when skip is seen, a synthetic test is reported with reason captured
-        # from the 'reason' file attachment if any.
-        result = StreamSummary()
-        result.startTestRun()
-        result.status(file_name="reason",
-            file_bytes=_b("Missing dependency"), eof=True,
-            mime_type="text/plain; charset=utf8", test_id="foo.bar")
-        result.status("foo.bar", "skip")
-        self.assertThat(result.skipped, HasLength(1))
-        self.assertEqual("foo.bar", result.skipped[0][0].id())
-        self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
-
-    def _report_files(self, result):
-        result.status(file_name="some log.txt",
-            file_bytes=_b("1234 log message"), eof=True,
-            mime_type="text/plain; charset=utf8", test_id="foo.bar")
-        result.status(file_name="traceback",
-            file_bytes=_b("""Traceback (most recent call last):
-  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
-      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
-testtools.matchers._impl.MismatchError: Differences: [
-[('startTestRun',), ('stopTestRun',)] != []
-[('startTestRun',), ('stopTestRun',)] != []
-]
-"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
-
-    files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
-
-Traceback (most recent call last):
-  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
-      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
-testtools.matchers._impl.MismatchError: Differences: [
-[('startTestRun',), ('stopTestRun',)] != []
-[('startTestRun',), ('stopTestRun',)] != []
-]
-"""))
-
-    def test_status_fail(self):
-        # when fail is seen, a synthetic test is reported with all files
-        # attached shown as the message.
-        result = StreamSummary()
-        result.startTestRun()
-        self._report_files(result)
-        result.status("foo.bar", "fail")
-        self.assertThat(result.errors, HasLength(1))
-        self.assertEqual("foo.bar", result.errors[0][0].id())
-        self.assertThat(result.errors[0][1], self.files_message)
-
-    def test_status_xfail(self):
-        # when xfail is seen, a synthetic test is reported with all files
-        # attached shown as the message.
-        result = StreamSummary()
-        result.startTestRun()
-        self._report_files(result)
-        result.status("foo.bar", "xfail")
-        self.assertThat(result.expectedFailures, HasLength(1))
-        self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
-        self.assertThat(result.expectedFailures[0][1], self.files_message)
-
-    def test_status_uxsuccess(self):
-        # when uxsuccess is seen, a synthetic test is reported.
-        result = StreamSummary()
-        result.startTestRun()
-        result.status("foo.bar", "uxsuccess")
-        self.assertThat(result.unexpectedSuccesses, HasLength(1))
-        self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
-
-
-class TestTestControl(TestCase):
-
-    def test_default(self):
-        self.assertEqual(False, TestControl().shouldStop)
-
-    def test_stop(self):
-        control = TestControl()
-        control.stop()
-        self.assertEqual(True, control.shouldStop)
-
-
-class TestTestResult(TestCase):
-    """Tests for 'TestResult'."""
-
-    run_tests_with = FullStackRunTest
-
-    def makeResult(self):
-        """Make an arbitrary result for testing."""
-        return TestResult()
-
-    def test_addSkipped(self):
-        # Calling addSkip on a TestResult records the test that was skipped in
-        # its skip_reasons dict.
-        result = self.makeResult()
-        result.addSkip(self, _u("Skipped for some reason"))
-        self.assertEqual({_u("Skipped for some reason"):[self]},
-            result.skip_reasons)
-        result.addSkip(self, _u("Skipped for some reason"))
-        self.assertEqual({_u("Skipped for some reason"):[self, self]},
-            result.skip_reasons)
-        result.addSkip(self, _u("Skipped for another reason"))
-        self.assertEqual({_u("Skipped for some reason"):[self, self],
-            _u("Skipped for another reason"):[self]},
-            result.skip_reasons)
-
-    def test_now_datetime_now(self):
-        result = self.makeResult()
-        olddatetime = testresult.real.datetime
-        def restore():
-            testresult.real.datetime = olddatetime
-        self.addCleanup(restore)
-        class Module:
-            pass
-        now = datetime.datetime.now(utc)
-        stubdatetime = Module()
-        stubdatetime.datetime = Module()
-        stubdatetime.datetime.now = lambda tz: now
-        testresult.real.datetime = stubdatetime
-        # Calling _now() looks up the time.
-        self.assertEqual(now, result._now())
-        then = now + datetime.timedelta(0, 1)
-        # Set an explicit datetime, which gets returned from then on.
-        result.time(then)
-        self.assertNotEqual(now, result._now())
-        self.assertEqual(then, result._now())
-        # go back to looking it up.
-        result.time(None)
-        self.assertEqual(now, result._now())
-
-    def test_now_datetime_time(self):
-        result = self.makeResult()
-        now = datetime.datetime.now(utc)
-        result.time(now)
-        self.assertEqual(now, result._now())
-
-    def test_traceback_formatting_without_stack_hidden(self):
-        # During the testtools test run, we show our levels of the stack,
-        # because we want to be able to use our test suite to debug our own
-        # code.
-        result = self.makeResult()
-        test = make_erroring_test()
-        test.run(result)
-        self.assertThat(
-            result.errors[0][1],
-            DocTestMatches(
-                'Traceback (most recent call last):\n'
-                '  File "...testtools...runtest.py", line ..., in _run_user\n'
-                '    return fn(*args, **kwargs)\n'
-                '  File "...testtools...testcase.py", line ..., in _run_test_method\n'
-                '    return self._get_test_method()()\n'
-                '  File "...testtools...tests...test_testresult.py", line ..., in error\n'
-                '    1/0\n'
-                'ZeroDivisionError: ...\n',
-                doctest.ELLIPSIS | doctest.REPORT_UDIFF))
-
-    def test_traceback_formatting_with_stack_hidden(self):
-        result = self.makeResult()
-        test = make_erroring_test()
-        run_with_stack_hidden(True, test.run, result)
-        self.assertThat(
-            result.errors[0][1],
-            DocTestMatches(
-                'Traceback (most recent call last):\n'
-                '  File "...testtools...tests...test_testresult.py", line ..., in error\n'
-                '    1/0\n'
-                'ZeroDivisionError: ...\n',
-                doctest.ELLIPSIS))
-
-    def test_traceback_formatting_with_stack_hidden_mismatch(self):
-        result = self.makeResult()
-        test = make_mismatching_test()
-        run_with_stack_hidden(True, test.run, result)
-        self.assertThat(
-            result.failures[0][1],
-            DocTestMatches(
-                'Traceback (most recent call last):\n'
-                '  File "...testtools...tests...test_testresult.py", line ..., in mismatch\n'
-                '    self.assertEqual(1, 2)\n'
-                '...MismatchError: 1 != 2\n',
-                doctest.ELLIPSIS))
-
-    def test_exc_info_to_unicode(self):
-        # subunit upcalls to TestResult._exc_info_to_unicode, so we need to
-        # make sure that it's there.
-        #
-        # See <https://bugs.launchpad.net/testtools/+bug/929063>.
-        test = make_erroring_test()
-        exc_info = make_exception_info(RuntimeError, "foo")
-        result = self.makeResult()
-        text_traceback = result._exc_info_to_unicode(exc_info, test)
-        self.assertEqual(
-            TracebackContent(exc_info, test).as_text(), text_traceback)
-
-
-class TestMultiTestResult(TestCase):
-    """Tests for 'MultiTestResult'."""
-
-    def setUp(self):
-        super(TestMultiTestResult, self).setUp()
-        self.result1 = LoggingResult([])
-        self.result2 = LoggingResult([])
-        self.multiResult = MultiTestResult(self.result1, self.result2)
-
-    def assertResultLogsEqual(self, expectedEvents):
-        """Assert that our test results have received the expected events."""
-        self.assertEqual(expectedEvents, self.result1._events)
-        self.assertEqual(expectedEvents, self.result2._events)
-
-    def test_repr(self):
-        self.assertEqual(
-            '<MultiTestResult (%r, %r)>' % (
-                ExtendedToOriginalDecorator(self.result1),
-                ExtendedToOriginalDecorator(self.result2)),
-            repr(self.multiResult))
-
-    def test_empty(self):
-        # Initializing a `MultiTestResult` doesn't do anything to its
-        # `TestResult`s.
-        self.assertResultLogsEqual([])
-
-    def test_failfast_get(self):
-        # Reading reads from the first one - arbitrary choice.
-        self.assertEqual(False, self.multiResult.failfast)
-        self.result1.failfast = True
-        self.assertEqual(True, self.multiResult.failfast)
-
-    def test_failfast_set(self):
-        # Writing writes to all.
-        self.multiResult.failfast = True
-        self.assertEqual(True, self.result1.failfast)
-        self.assertEqual(True, self.result2.failfast)
-
-    def test_shouldStop(self):
-        self.assertFalse(self.multiResult.shouldStop)
-        self.result2.stop()
-        # NB: result1 is not stopped: MultiTestResult has to combine the
-        # values.
-        self.assertTrue(self.multiResult.shouldStop)
-
-    def test_startTest(self):
-        # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
-        # its `TestResult`s.
-        self.multiResult.startTest(self)
-        self.assertResultLogsEqual([('startTest', self)])
-
-    def test_stop(self):
-        self.assertFalse(self.multiResult.shouldStop)
-        self.multiResult.stop()
-        self.assertResultLogsEqual(['stop'])
-
-    def test_stopTest(self):
-        # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
-        # its `TestResult`s.
-        self.multiResult.stopTest(self)
-        self.assertResultLogsEqual([('stopTest', self)])
-
-    def test_addSkipped(self):
-        # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
-        # results.
-        reason = _u("Skipped for some reason")
-        self.multiResult.addSkip(self, reason)
-        self.assertResultLogsEqual([('addSkip', self, reason)])
-
-    def test_addSuccess(self):
-        # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
-        # all its `TestResult`s.
-        self.multiResult.addSuccess(self)
-        self.assertResultLogsEqual([('addSuccess', self)])
-
-    def test_done(self):
-        # Calling `done` on a `MultiTestResult` calls `done` on all its
-        # `TestResult`s.
-        self.multiResult.done()
-        self.assertResultLogsEqual([('done')])
-
-    def test_addFailure(self):
-        # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
-        # all its `TestResult`s.
-        exc_info = make_exception_info(AssertionError, 'failure')
-        self.multiResult.addFailure(self, exc_info)
-        self.assertResultLogsEqual([('addFailure', self, exc_info)])
-
-    def test_addError(self):
-        # Calling `addError` on a `MultiTestResult` calls `addError` on all
-        # its `TestResult`s.
-        exc_info = make_exception_info(RuntimeError, 'error')
-        self.multiResult.addError(self, exc_info)
-        self.assertResultLogsEqual([('addError', self, exc_info)])
-
-    def test_startTestRun(self):
-        # Calling `startTestRun` on a `MultiTestResult` forwards to all its
-        # `TestResult`s.
-        self.multiResult.startTestRun()
-        self.assertResultLogsEqual([('startTestRun')])
-
-    def test_stopTestRun(self):
-        # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
-        # `TestResult`s.
-        self.multiResult.stopTestRun()
-        self.assertResultLogsEqual([('stopTestRun')])
-
-    def test_stopTestRun_returns_results(self):
-        # `MultiTestResult.stopTestRun` returns a tuple of all of the return
-        # values the `stopTestRun`s that it forwards to.
-        class Result(LoggingResult):
-            def stopTestRun(self):
-                super(Result, self).stopTestRun()
-                return 'foo'
-        multi_result = MultiTestResult(Result([]), Result([]))
-        result = multi_result.stopTestRun()
-        self.assertEqual(('foo', 'foo'), result)
-
-    def test_tags(self):
-        # Calling `tags` on a `MultiTestResult` calls `tags` on all its
-        # `TestResult`s.
-        added_tags = set(['foo', 'bar'])
-        removed_tags = set(['eggs'])
-        self.multiResult.tags(added_tags, removed_tags)
-        self.assertResultLogsEqual([('tags', added_tags, removed_tags)])
-
-    def test_time(self):
-        # the time call is dispatched, not eaten by the base class
-        self.multiResult.time('foo')
-        self.assertResultLogsEqual([('time', 'foo')])
-
-
-class TestTextTestResult(TestCase):
-    """Tests for 'TextTestResult'."""
-
-    def setUp(self):
-        super(TestTextTestResult, self).setUp()
-        self.result = TextTestResult(StringIO())
-
-    def getvalue(self):
-        return self.result.stream.getvalue()
-
-    def test__init_sets_stream(self):
-        result = TextTestResult("fp")
-        self.assertEqual("fp", result.stream)
-
-    def reset_output(self):
-        self.result.stream = StringIO()
-
-    def test_startTestRun(self):
-        self.result.startTestRun()
-        self.assertEqual("Tests running...\n", self.getvalue())
-
-    def test_stopTestRun_count_many(self):
-        test = make_test()
-        self.result.startTestRun()
-        self.result.startTest(test)
-        self.result.stopTest(test)
-        self.result.startTest(test)
-        self.result.stopTest(test)
-        self.result.stream = StringIO()
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
-
-    def test_stopTestRun_count_single(self):
-        test = make_test()
-        self.result.startTestRun()
-        self.result.startTest(test)
-        self.result.stopTest(test)
-        self.reset_output()
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_count_zero(self):
-        self.result.startTestRun()
-        self.reset_output()
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_current_time(self):
-        test = make_test()
-        now = datetime.datetime.now(utc)
-        self.result.time(now)
-        self.result.startTestRun()
-        self.result.startTest(test)
-        now = now + datetime.timedelta(0, 0, 0, 1)
-        self.result.time(now)
-        self.result.stopTest(test)
-        self.reset_output()
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
-
-    def test_stopTestRun_successful(self):
-        self.result.startTestRun()
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_not_successful_failure(self):
-        test = make_failing_test()
-        self.result.startTestRun()
-        test.run(self.result)
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_not_successful_error(self):
-        test = make_erroring_test()
-        self.result.startTestRun()
-        test.run(self.result)
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_not_successful_unexpected_success(self):
-        test = make_unexpectedly_successful_test()
-        self.result.startTestRun()
-        test.run(self.result)
-        self.result.stopTestRun()
-        self.assertThat(self.getvalue(),
-            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
-    def test_stopTestRun_shows_details(self):
-        self.skip("Disabled per bug 1188420")
-        def run_tests():
-            self.result.startTestRun()
-            make_erroring_test().run(self.result)
-            make_unexpectedly_successful_test().run(self.result)
-            make_failing_test().run(self.result)
-            self.reset_output()
-            self.result.stopTestRun()
-        run_with_stack_hidden(True, run_tests)
-        self.assertThat(self.getvalue(),
-            DocTestMatches("""...======================================================================
-ERROR: testtools.tests.test_testresult.Test.error
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "...testtools...tests...test_testresult.py", line ..., in error
-    1/0
-ZeroDivisionError:... divi... by zero...
-======================================================================
-FAIL: testtools.tests.test_testresult.Test.failed
-----------------------------------------------------------------------
-Traceback (most recent call last):
-  File "...testtools...tests...test_testresult.py", line ..., in failed
-    self.fail("yo!")
-AssertionError: yo!
-======================================================================
-UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
-----------------------------------------------------------------------
-...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
-
-
-class TestThreadSafeForwardingResult(TestCase):
-    """Tests for `TestThreadSafeForwardingResult`."""
-
-    def make_results(self, n):
-        events = []
-        target = LoggingResult(events)
-        semaphore = threading.Semaphore(1)
-        return [
-            ThreadsafeForwardingResult(target, semaphore)
-            for i in range(n)], events
-
-    def test_nonforwarding_methods(self):
-        # startTest and stopTest are not forwarded because they need to be
-        # batched.
-        [result], events = self.make_results(1)
-        result.startTest(self)
-        result.stopTest(self)
-        self.assertEqual([], events)
-
-    def test_tags_not_forwarded(self):
-        # Tags need to be batched for each test, so they aren't forwarded
-        # until a test runs.
-        [result], events = self.make_results(1)
-        result.tags(set(['foo']), set(['bar']))
-        self.assertEqual([], events)
-
-    def test_global_tags_simple(self):
-        # Tags specified outside of a test result are global. When a test's
-        # results are finally forwarded, we send through these global tags
-        # *as* test specific tags, because as a multiplexer there should be no
-        # way for a global tag on an input stream to affect tests from other
-        # streams - we can just always issue test local tags.
-        [result], events = self.make_results(1)
-        result.tags(set(['foo']), set())
-        result.time(1)
-        result.startTest(self)
-        result.time(2)
-        result.addSuccess(self)
-        self.assertEqual(
-            [('time', 1),
-             ('startTest', self),
-             ('time', 2),
-             ('tags', set(['foo']), set()),
-             ('addSuccess', self),
-             ('stopTest', self),
-             ], events)
-
-    def test_global_tags_complex(self):
-        # Multiple calls to tags() in a global context are buffered until the
-        # next test completes and are issued as part of of the test context,
-        # because they cannot be issued until the output result is locked.
-        # The sample data shows them being merged together, this is, strictly
-        # speaking incidental - they could be issued separately (in-order) and
-        # still be legitimate.
-        [result], events = self.make_results(1)
-        result.tags(set(['foo', 'bar']), set(['baz', 'qux']))
-        result.tags(set(['cat', 'qux']), set(['bar', 'dog']))
-        result.time(1)
-        result.startTest(self)
-        result.time(2)
-        result.addSuccess(self)
-        self.assertEqual(
-            [('time', 1),
-             ('startTest', self),
-             ('time', 2),
-             ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])),
-             ('addSuccess', self),
-             ('stopTest', self),
-             ], events)
-
-    def test_local_tags(self):
-        # Any tags set within a test context are forwarded in that test
-        # context when the result is finally forwarded.  This means that the
-        # tags for the test are part of the atomic message communicating
-        # everything about that test.
-        [result], events = self.make_results(1)
-        result.time(1)
-        result.startTest(self)
-        result.tags(set(['foo']), set([]))
-        result.tags(set(), set(['bar']))
-        result.time(2)
-        result.addSuccess(self)
-        self.assertEqual(
-            [('time', 1),
-             ('startTest', self),
-             ('time', 2),
-             ('tags', set(['foo']), set(['bar'])),
-             ('addSuccess', self),
-             ('stopTest', self),
-             ], events)
-
-    def test_local_tags_dont_leak(self):
-        # A tag set during a test is local to that test and is not set during
-        # the tests that follow.
-        [result], events = self.make_results(1)
-        a, b = PlaceHolder('a'), PlaceHolder('b')
-        result.time(1)
-        result.startTest(a)
-        result.tags(set(['foo']), set([]))
-        result.time(2)
-        result.addSuccess(a)
-        result.stopTest(a)
-        result.time(3)
-        result.startTest(b)
-        result.time(4)
-        result.addSuccess(b)
-        result.stopTest(b)
-        self.assertEqual(
-            [('time', 1),
-             ('startTest', a),
-             ('time', 2),
-             ('tags', set(['foo']), set()),
-             ('addSuccess', a),
-             ('stopTest', a),
-             ('time', 3),
-             ('startTest', b),
-             ('time', 4),
-             ('addSuccess', b),
-             ('stopTest', b),
-             ], events)
-
-    def test_startTestRun(self):
-        # Calls to startTestRun are not batched, because we are only
-        # interested in sending tests atomically, not the whole run.
-        [result1, result2], events = self.make_results(2)
-        result1.startTestRun()
-        result2.startTestRun()
-        self.assertEqual(["startTestRun", "startTestRun"], events)
-
-    def test_stopTestRun(self):
-        # Calls to stopTestRun are not batched, because we are only
-        # interested in sending tests atomically, not the whole run.
-        [result1, result2], events = self.make_results(2)
-        result1.stopTestRun()
-        result2.stopTestRun()
-        self.assertEqual(["stopTestRun", "stopTestRun"], events)
-
-    def test_forward_addError(self):
-        # Once we receive an addError event, we forward all of the events for
-        # that test, as we now know that test is complete.
-        [result], events = self.make_results(1)
-        exc_info = make_exception_info(RuntimeError, 'error')
-        start_time = datetime.datetime.utcfromtimestamp(1.489)
-        end_time = datetime.datetime.utcfromtimestamp(51.476)
-        result.time(start_time)
-        result.startTest(self)
-        result.time(end_time)
-        result.addError(self, exc_info)
-        self.assertEqual([
-            ('time', start_time),
-            ('startTest', self),
-            ('time', end_time),
-            ('addError', self, exc_info),
-            ('stopTest', self),
-            ], events)
-
-    def test_forward_addFailure(self):
-        # Once we receive an addFailure event, we forward all of the events
-        # for that test, as we now know that test is complete.
-        [result], events = self.make_results(1)
-        exc_info = make_exception_info(AssertionError, 'failure')
-        start_time = datetime.datetime.utcfromtimestamp(2.489)
-        end_time = datetime.datetime.utcfromtimestamp(3.476)
-        result.time(start_time)
-        result.startTest(self)
-        result.time(end_time)
-        result.addFailure(self, exc_info)
-        self.assertEqual([
-            ('time', start_time),
-            ('startTest', self),
-            ('time', end_time),
-            ('addFailure', self, exc_info),
-            ('stopTest', self),
-            ], events)
-
-    def test_forward_addSkip(self):
-        # Once we receive an addSkip event, we forward all of the events for
-        # that test, as we now know that test is complete.
-        [result], events = self.make_results(1)
-        reason = _u("Skipped for some reason")
-        start_time = datetime.datetime.utcfromtimestamp(4.489)
-        end_time = datetime.datetime.utcfromtimestamp(5.476)
-        result.time(start_time)
-        result.startTest(self)
-        result.time(end_time)
-        result.addSkip(self, reason)
-        self.assertEqual([
-            ('time', start_time),
-            ('startTest', self),
-            ('time', end_time),
-            ('addSkip', self, reason),
-            ('stopTest', self),
-            ], events)
-
-    def test_forward_addSuccess(self):
-        # Once we receive an addSuccess event, we forward all of the events
-        # for that test, as we now know that test is complete.
-        [result], events = self.make_results(1)
-        start_time = datetime.datetime.utcfromtimestamp(6.489)
-        end_time = datetime.datetime.utcfromtimestamp(7.476)
-        result.time(start_time)
-        result.startTest(self)
-        result.time(end_time)
-        result.addSuccess(self)
-        self.assertEqual([
-            ('time', start_time),
-            ('startTest', self),
-            ('time', end_time),
-            ('addSuccess', self),
-            ('stopTest', self),
-            ], events)
-
-    def test_only_one_test_at_a_time(self):
-        # Even if there are multiple ThreadsafeForwardingResults forwarding to
-        # the same target result, the target result only receives the complete
-        # events for one test at a time.
-        [result1, result2], events = self.make_results(2)
-        test1, test2 = self, make_test()
-        start_time1 = datetime.datetime.utcfromtimestamp(1.489)
-        end_time1 = datetime.datetime.utcfromtimestamp(2.476)
-        start_time2 = datetime.datetime.utcfromtimestamp(3.489)
-        end_time2 = datetime.datetime.utcfromtimestamp(4.489)
-        result1.time(start_time1)
-        result2.time(start_time2)
-        result1.startTest(test1)
-        result2.startTest(test2)
-        result1.time(end_time1)
-        result2.time(end_time2)
-        result2.addSuccess(test2)
-        result1.addSuccess(test1)
-        self.assertEqual([
-            # test2 finishes first, and so is flushed first.
-            ('time', start_time2),
-            ('startTest', test2),
-            ('time', end_time2),
-            ('addSuccess', test2),
-            ('stopTest', test2),
-            # test1 finishes next, and thus follows.
-            ('time', start_time1),
-            ('startTest', test1),
-            ('time', end_time1),
-            ('addSuccess', test1),
-            ('stopTest', test1),
-            ], events)
-
-
-class TestMergeTags(TestCase):
-
-    def test_merge_unseen_gone_tag(self):
-        # If an incoming "gone" tag isn't currently tagged one way or the
-        # other, add it to the "gone" tags.
-        current_tags = set(['present']), set(['missing'])
-        changing_tags = set(), set(['going'])
-        expected = set(['present']), set(['missing', 'going'])
-        self.assertEqual(
-            expected, _merge_tags(current_tags, changing_tags))
-
-    def test_merge_incoming_gone_tag_with_current_new_tag(self):
-        # If one of the incoming "gone" tags is one of the existing "new"
-        # tags, then it overrides the "new" tag, leaving it marked as "gone".
-        current_tags = set(['present', 'going']), set(['missing'])
-        changing_tags = set(), set(['going'])
-        expected = set(['present']), set(['missing', 'going'])
-        self.assertEqual(
-            expected, _merge_tags(current_tags, changing_tags))
-
-    def test_merge_unseen_new_tag(self):
-        current_tags = set(['present']), set(['missing'])
-        changing_tags = set(['coming']), set()
-        expected = set(['coming', 'present']), set(['missing'])
-        self.assertEqual(
-            expected, _merge_tags(current_tags, changing_tags))
-
-    def test_merge_incoming_new_tag_with_current_gone_tag(self):
-        # If one of the incoming "new" tags is currently marked as "gone",
-        # then it overrides the "gone" tag, leaving it marked as "new".
-        current_tags = set(['present']), set(['coming', 'missing'])
-        changing_tags = set(['coming']), set()
-        expected = set(['coming', 'present']), set(['missing'])
-        self.assertEqual(
-            expected, _merge_tags(current_tags, changing_tags))
-
-
-class TestStreamResultRouter(TestCase):
-
-    def test_start_stop_test_run_no_fallback(self):
-        result = StreamResultRouter()
-        result.startTestRun()
-        result.stopTestRun()
-
-    def test_no_fallback_errors(self):
-        self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
-
-    def test_fallback_calls(self):
-        fallback = LoggingStreamResult()
-        result = StreamResultRouter(fallback)
-        result.startTestRun()
-        result.status(test_id='foo')
-        result.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('status', 'foo', None, None, True, None, None, False, None, None,
-             None),
-            ('stopTestRun',),
-            ],
-            fallback._events)
-
-    def test_fallback_no_do_start_stop_run(self):
-        fallback = LoggingStreamResult()
-        result = StreamResultRouter(fallback, do_start_stop_run=False)
-        result.startTestRun()
-        result.status(test_id='foo')
-        result.stopTestRun()
-        self.assertEqual([
-            ('status', 'foo', None, None, True, None, None, False, None, None,
-             None)
-            ],
-            fallback._events)
-
-    def test_add_rule_bad_policy(self):
-        router = StreamResultRouter()
-        target = LoggingStreamResult()
-        self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
-            route_prefix='0')
-
-    def test_add_rule_extra_policy_arg(self):
-        router = StreamResultRouter()
-        target = LoggingStreamResult()
-        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
-            route_prefix='0', foo=1)
-
-    def test_add_rule_missing_prefix(self):
-        router = StreamResultRouter()
-        target = LoggingStreamResult()
-        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
-
-    def test_add_rule_slash_in_prefix(self):
-        router = StreamResultRouter()
-        target = LoggingStreamResult()
-        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
-            route_prefix='0/')
-
-    def test_add_rule_route_code_consume_False(self):
-        fallback = LoggingStreamResult()
-        target = LoggingStreamResult()
-        router = StreamResultRouter(fallback)
-        router.add_rule(target, 'route_code_prefix', route_prefix='0')
-        router.status(test_id='foo', route_code='0')
-        router.status(test_id='foo', route_code='0/1')
-        router.status(test_id='foo')
-        self.assertEqual([
-            ('status', 'foo', None, None, True, None, None, False, None, '0',
-             None),
-            ('status', 'foo', None, None, True, None, None, False, None, '0/1',
-             None),
-            ],
-            target._events)
-        self.assertEqual([
-            ('status', 'foo', None, None, True, None, None, False, None, None,
-             None),
-            ],
-            fallback._events)
-
-    def test_add_rule_route_code_consume_True(self):
-        fallback = LoggingStreamResult()
-        target = LoggingStreamResult()
-        router = StreamResultRouter(fallback)
-        router.add_rule(
-            target, 'route_code_prefix', route_prefix='0', consume_route=True)
-        router.status(test_id='foo', route_code='0') # -> None
-        router.status(test_id='foo', route_code='0/1') # -> 1
-        router.status(test_id='foo', route_code='1') # -> fallback as-is.
-        self.assertEqual([
-            ('status', 'foo', None, None, True, None, None, False, None, None,
-             None),
-            ('status', 'foo', None, None, True, None, None, False, None, '1',
-             None),
-            ],
-            target._events)
-        self.assertEqual([
-            ('status', 'foo', None, None, True, None, None, False, None, '1',
-             None),
-            ],
-            fallback._events)
-
-    def test_add_rule_test_id(self):
-        nontest = LoggingStreamResult()
-        test = LoggingStreamResult()
-        router = StreamResultRouter(test)
-        router.add_rule(nontest, 'test_id', test_id=None)
-        router.status(test_id='foo', file_name="bar", file_bytes=b'')
-        router.status(file_name="bar", file_bytes=b'')
-        self.assertEqual([
-            ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
-             None),], test._events)
-        self.assertEqual([
-            ('status', None, None, None, True, 'bar', b'', False, None, None,
-             None),], nontest._events)
-
-    def test_add_rule_do_start_stop_run(self):
-        nontest = LoggingStreamResult()
-        router = StreamResultRouter()
-        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
-        router.startTestRun()
-        router.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('stopTestRun',),
-            ], nontest._events)
-
-    def test_add_rule_do_start_stop_run_after_startTestRun(self):
-        nontest = LoggingStreamResult()
-        router = StreamResultRouter()
-        router.startTestRun()
-        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
-        router.stopTestRun()
-        self.assertEqual([
-            ('startTestRun',),
-            ('stopTestRun',),
-            ], nontest._events)
-
-
-class TestStreamToQueue(TestCase):
-
-    def make_result(self):
-        queue = Queue()
-        return queue, StreamToQueue(queue, "foo")
-
-    def test_status(self):
-        def check_event(event_dict, route=None, time=None):
-            self.assertEqual("status", event_dict['event'])
-            self.assertEqual("test", event_dict['test_id'])
-            self.assertEqual("fail", event_dict['test_status'])
-            self.assertEqual(set(["quux"]), event_dict['test_tags'])
-            self.assertEqual(False, event_dict['runnable'])
-            self.assertEqual("file", event_dict['file_name'])
-            self.assertEqual(_b("content"), event_dict['file_bytes'])
-            self.assertEqual(True, event_dict['eof'])
-            self.assertEqual("quux", event_dict['mime_type'])
-            self.assertEqual("test", event_dict['test_id'])
-            self.assertEqual(route, event_dict['route_code'])
-            self.assertEqual(time, event_dict['timestamp'])
-        queue, result = self.make_result()
-        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
-            file_name="file", file_bytes=_b("content"), eof=True,
-            mime_type="quux", route_code=None, timestamp=None)
-        self.assertEqual(1, queue.qsize())
-        a_time = datetime.datetime.now(utc)
-        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
-            file_name="file", file_bytes=_b("content"), eof=True,
-            mime_type="quux", route_code="bar", timestamp=a_time)
-        self.assertEqual(2, queue.qsize())
-        check_event(queue.get(False), route="foo", time=None)
-        check_event(queue.get(False), route="foo/bar", time=a_time)
-
-    def testStartTestRun(self):
-        queue, result = self.make_result()
-        result.startTestRun()
-        self.assertEqual(
-            {'event':'startTestRun', 'result':result}, queue.get(False))
-        self.assertTrue(queue.empty())
-
-    def testStopTestRun(self):
-        queue, result = self.make_result()
-        result.stopTestRun()
-        self.assertEqual(
-            {'event':'stopTestRun', 'result':result}, queue.get(False))
-        self.assertTrue(queue.empty())
-
-
-class TestExtendedToOriginalResultDecoratorBase(TestCase):
-
-    def make_26_result(self):
-        self.result = Python26TestResult()
-        self.make_converter()
-
-    def make_27_result(self):
-        self.result = Python27TestResult()
-        self.make_converter()
-
-    def make_converter(self):
-        self.converter = ExtendedToOriginalDecorator(self.result)
-
-    def make_extended_result(self):
-        self.result = ExtendedTestResult()
-        self.make_converter()
-
-    def check_outcome_details(self, outcome):
-        """Call an outcome with a details dict to be passed through."""
-        # This dict is /not/ convertible - thats deliberate, as it should
-        # not hit the conversion code path.
-        details = {'foo': 'bar'}
-        getattr(self.converter, outcome)(self, details=details)
-        self.assertEqual([(outcome, self, details)], self.result._events)
-
-    def get_details_and_string(self):
-        """Get a details dict and expected string."""
-        text1 = lambda: [_b("1\n2\n")]
-        text2 = lambda: [_b("3\n4\n")]
-        bin1 = lambda: [_b("5\n")]
-        details = {'text 1': Content(ContentType('text', 'plain'), text1),
-            'text 2': Content(ContentType('text', 'strange'), text2),
-            'bin 1': Content(ContentType('application', 'binary'), bin1)}
-        return (details,
-                ("Binary content:\n"
-                 "  bin 1 (application/binary)\n"
-                 "\n"
-                 "text 1: {{{\n"
-                 "1\n"
-                 "2\n"
-                 "}}}\n"
-                 "\n"
-                 "text 2: {{{\n"
-                 "3\n"
-                 "4\n"
-                 "}}}\n"))
-
-    def check_outcome_details_to_exec_info(self, outcome, expected=None):
-        """Call an outcome with a details dict to be made into exc_info."""
-        # The conversion is a done using RemoteError and the string contents
-        # of the text types in the details dict.
-        if not expected:
-            expected = outcome
-        details, err_str = self.get_details_and_string()
-        getattr(self.converter, outcome)(self, details=details)
-        err = self.converter._details_to_exc_info(details)
-        self.assertEqual([(expected, self, err)], self.result._events)
-
-    def check_outcome_details_to_nothing(self, outcome, expected=None):
-        """Call an outcome with a details dict to be swallowed."""
-        if not expected:
-            expected = outcome
-        details = {'foo': 'bar'}
-        getattr(self.converter, outcome)(self, details=details)
-        self.assertEqual([(expected, self)], self.result._events)
-
-    def check_outcome_details_to_string(self, outcome):
-        """Call an outcome with a details dict to be stringified."""
-        details, err_str = self.get_details_and_string()
-        getattr(self.converter, outcome)(self, details=details)
-        self.assertEqual([(outcome, self, err_str)], self.result._events)
-
-    def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
-        """Call an outcome with a details dict to have an arg extracted."""
-        details, _ = self.get_details_and_string()
-        if extra_detail:
-            details.update(extra_detail)
-        getattr(self.converter, outcome)(self, details=details)
-        self.assertEqual([(outcome, self, arg)], self.result._events)
-
-    def check_outcome_exc_info(self, outcome, expected=None):
-        """Check that calling a legacy outcome still works."""
-        # calling some outcome with the legacy exc_info style api (no keyword
-        # parameters) gets passed through.
-        if not expected:
-            expected = outcome
-        err = sys.exc_info()
-        getattr(self.converter, outcome)(self, err)
-        self.assertEqual([(expected, self, err)], self.result._events)
-
-    def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
-        """Check that calling a legacy outcome on a fallback works."""
-        # calling some outcome with the legacy exc_info style api (no keyword
-        # parameters) gets passed through.
-        if not expected:
-            expected = outcome
-        err = sys.exc_info()
-        getattr(self.converter, outcome)(self, err)
-        self.assertEqual([(expected, self)], self.result._events)
-
-    def check_outcome_nothing(self, outcome, expected=None):
-        """Check that calling a legacy outcome still works."""
-        if not expected:
-            expected = outcome
-        getattr(self.converter, outcome)(self)
-        self.assertEqual([(expected, self)], self.result._events)
-
-    def check_outcome_string_nothing(self, outcome, expected):
-        """Check that calling outcome with a string calls expected."""
-        getattr(self.converter, outcome)(self, "foo")
-        self.assertEqual([(expected, self)], self.result._events)
-
-    def check_outcome_string(self, outcome):
-        """Check that calling outcome with a string works."""
-        getattr(self.converter, outcome)(self, "foo")
-        self.assertEqual([(outcome, self, "foo")], self.result._events)
-
-
-class TestExtendedToOriginalResultDecorator(
-    TestExtendedToOriginalResultDecoratorBase):
-
-    def test_failfast_py26(self):
-        self.make_26_result()
-        self.assertEqual(False, self.converter.failfast)
-        self.converter.failfast = True
-        self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast'))
-
-    def test_failfast_py27(self):
-        self.make_27_result()
-        self.assertEqual(False, self.converter.failfast)
-        # setting it should write it to the backing result
-        self.converter.failfast = True
-        self.assertEqual(True, self.converter.decorated.failfast)
-
-    def test_progress_py26(self):
-        self.make_26_result()
-        self.converter.progress(1, 2)
-
-    def test_progress_py27(self):
-        self.make_27_result()
-        self.converter.progress(1, 2)
-
-    def test_progress_pyextended(self):
-        self.make_extended_result()
-        self.converter.progress(1, 2)
-        self.assertEqual([('progress', 1, 2)], self.result._events)
-
-    def test_shouldStop(self):
-        self.make_26_result()
-        self.assertEqual(False, self.converter.shouldStop)
-        self.converter.decorated.stop()
-        self.assertEqual(True, self.converter.shouldStop)
-
-    def test_startTest_py26(self):
-        self.make_26_result()
-        self.converter.startTest(self)
-        self.assertEqual([('startTest', self)], self.result._events)
-
-    def test_startTest_py27(self):
-        self.make_27_result()
-        self.converter.startTest(self)
-        self.assertEqual([('startTest', self)], self.result._events)
-
-    def test_startTest_pyextended(self):
-        self.make_extended_result()
-        self.converter.startTest(self)
-        self.assertEqual([('startTest', self)], self.result._events)
-
-    def test_startTestRun_py26(self):
-        self.make_26_result()
-        self.converter.startTestRun()
-        self.assertEqual([], self.result._events)
-
-    def test_startTestRun_py27(self):
-        self.make_27_result()
-        self.converter.startTestRun()
-        self.assertEqual([('startTestRun',)], self.result._events)
-
-    def test_startTestRun_pyextended(self):
-        self.make_extended_result()
-        self.converter.startTestRun()
-        self.assertEqual([('startTestRun',)], self.result._events)
-
-    def test_stopTest_py26(self):
-        self.make_26_result()
-        self.converter.stopTest(self)
-        self.assertEqual([('stopTest', self)], self.result._events)
-
-    def test_stopTest_py27(self):
-        self.make_27_result()
-        self.converter.stopTest(self)
-        self.assertEqual([('stopTest', self)], self.result._events)
-
-    def test_stopTest_pyextended(self):
-        self.make_extended_result()
-        self.converter.stopTest(self)
-        self.assertEqual([('stopTest', self)], self.result._events)
-
-    def test_stopTestRun_py26(self):
-        self.make_26_result()
-        self.converter.stopTestRun()
-        self.assertEqual([], self.result._events)
-
-    def test_stopTestRun_py27(self):
-        self.make_27_result()
-        self.converter.stopTestRun()
-        self.assertEqual([('stopTestRun',)], self.result._events)
-
-    def test_stopTestRun_pyextended(self):
-        self.make_extended_result()
-        self.converter.stopTestRun()
-        self.assertEqual([('stopTestRun',)], self.result._events)
-
-    def test_tags_py26(self):
-        self.make_26_result()
-        self.converter.tags(set([1]), set([2]))
-
-    def test_tags_py27(self):
-        self.make_27_result()
-        self.converter.tags(set([1]), set([2]))
-
-    def test_tags_pyextended(self):
-        self.make_extended_result()
-        self.converter.tags(set([1]), set([2]))
-        self.assertEqual([('tags', set([1]), set([2]))], self.result._events)
-
-    def test_time_py26(self):
-        self.make_26_result()
-        self.converter.time(1)
-
-    def test_time_py27(self):
-        self.make_27_result()
-        self.converter.time(1)
-
-    def test_time_pyextended(self):
-        self.make_extended_result()
-        self.converter.time(1)
-        self.assertEqual([('time', 1)], self.result._events)
-
-
-class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
-
-    outcome = 'addError'
-
-    def test_outcome_Original_py26(self):
-        self.make_26_result()
-        self.check_outcome_exc_info(self.outcome)
-
-    def test_outcome_Original_py27(self):
-        self.make_27_result()
-        self.check_outcome_exc_info(self.outcome)
-
-    def test_outcome_Original_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_exc_info(self.outcome)
-
-    def test_outcome_Extended_py26(self):
-        self.make_26_result()
-        self.check_outcome_details_to_exec_info(self.outcome)
-
-    def test_outcome_Extended_py27(self):
-        self.make_27_result()
-        self.check_outcome_details_to_exec_info(self.outcome)
-
-    def test_outcome_Extended_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_details(self.outcome)
-
-    def test_outcome__no_details(self):
-        self.make_extended_result()
-        self.assertThat(
-            lambda: getattr(self.converter, self.outcome)(self),
-            Raises(MatchesException(ValueError)))
-
-
-class TestExtendedToOriginalAddFailure(
-    TestExtendedToOriginalAddError):
-
-    outcome = 'addFailure'
-
-
-class TestExtendedToOriginalAddExpectedFailure(
-    TestExtendedToOriginalAddError):
-
-    outcome = 'addExpectedFailure'
-
-    def test_outcome_Original_py26(self):
-        self.make_26_result()
-        self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
-
-    def test_outcome_Extended_py26(self):
-        self.make_26_result()
-        self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
-
-
-
-class TestExtendedToOriginalAddSkip(
-    TestExtendedToOriginalResultDecoratorBase):
-
-    outcome = 'addSkip'
-
-    def test_outcome_Original_py26(self):
-        self.make_26_result()
-        self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
-    def test_outcome_Original_py27(self):
-        self.make_27_result()
-        self.check_outcome_string(self.outcome)
-
-    def test_outcome_Original_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_string(self.outcome)
-
-    def test_outcome_Extended_py26(self):
-        self.make_26_result()
-        self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
-    def test_outcome_Extended_py27_no_reason(self):
-        self.make_27_result()
-        self.check_outcome_details_to_string(self.outcome)
-
-    def test_outcome_Extended_py27_reason(self):
-        self.make_27_result()
-        self.check_outcome_details_to_arg(self.outcome, 'foo',
-            {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
-
-    def test_outcome_Extended_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_details(self.outcome)
-
-    def test_outcome__no_details(self):
-        self.make_extended_result()
-        self.assertThat(
-            lambda: getattr(self.converter, self.outcome)(self),
-            Raises(MatchesException(ValueError)))
-
-
-class TestExtendedToOriginalAddSuccess(
-    TestExtendedToOriginalResultDecoratorBase):
-
-    outcome = 'addSuccess'
-    expected = 'addSuccess'
-
-    def test_outcome_Original_py26(self):
-        self.make_26_result()
-        self.check_outcome_nothing(self.outcome, self.expected)
-
-    def test_outcome_Original_py27(self):
-        self.make_27_result()
-        self.check_outcome_nothing(self.outcome)
-
-    def test_outcome_Original_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_nothing(self.outcome)
-
-    def test_outcome_Extended_py26(self):
-        self.make_26_result()
-        self.check_outcome_details_to_nothing(self.outcome, self.expected)
-
-    def test_outcome_Extended_py27(self):
-        self.make_27_result()
-        self.check_outcome_details_to_nothing(self.outcome)
-
-    def test_outcome_Extended_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_details(self.outcome)
-
-
-class TestExtendedToOriginalAddUnexpectedSuccess(
-    TestExtendedToOriginalResultDecoratorBase):
-
-    outcome = 'addUnexpectedSuccess'
-    expected = 'addFailure'
-
-    def test_outcome_Original_py26(self):
-        self.make_26_result()
-        getattr(self.converter, self.outcome)(self)
-        [event] = self.result._events
-        self.assertEqual((self.expected, self), event[:2])
-
-    def test_outcome_Original_py27(self):
-        self.make_27_result()
-        self.check_outcome_nothing(self.outcome)
-
-    def test_outcome_Original_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_nothing(self.outcome)
-
-    def test_outcome_Extended_py26(self):
-        self.make_26_result()
-        getattr(self.converter, self.outcome)(self)
-        [event] = self.result._events
-        self.assertEqual((self.expected, self), event[:2])
-
-    def test_outcome_Extended_py27(self):
-        self.make_27_result()
-        self.check_outcome_details_to_nothing(self.outcome)
-
-    def test_outcome_Extended_pyextended(self):
-        self.make_extended_result()
-        self.check_outcome_details(self.outcome)
-
-
-class TestExtendedToOriginalResultOtherAttributes(
-    TestExtendedToOriginalResultDecoratorBase):
-
-    def test_other_attribute(self):
-        class OtherExtendedResult:
-            def foo(self):
-                return 2
-            bar = 1
-        self.result = OtherExtendedResult()
-        self.make_converter()
-        self.assertEqual(1, self.converter.bar)
-        self.assertEqual(2, self.converter.foo())
-
-
-class TestNonAsciiResults(TestCase):
-    """Test all kinds of tracebacks are cleanly interpreted as unicode
-
-    Currently only uses weak "contains" assertions, would be good to be much
-    stricter about the expected output. This would add a few failures for the
-    current release of IronPython for instance, which gets some traceback
-    lines muddled.
-    """
-
-    _sample_texts = (
-        _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
-        _u("\u5357\u7121"), # In ISO 2022 encodings
-        _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
-        )
-
-    _is_pypy = "__pypy__" in sys.builtin_module_names
-    # Everything but Jython shows syntax errors on the current character
-    _error_on_character = os.name != "java" and not _is_pypy
-
-    def _run(self, stream, test):
-        """Run the test, the same as in testtools.run but not to stdout"""
-        result = TextTestResult(stream)
-        result.startTestRun()
-        try:
-            return test.run(result)
-        finally:
-            result.stopTestRun()
-
-    def _write_module(self, name, encoding, contents):
-        """Create Python module on disk with contents in given encoding"""
-        try:
-            # Need to pre-check that the coding is valid or codecs.open drops
-            # the file without closing it which breaks non-refcounted pythons
-            codecs.lookup(encoding)
-        except LookupError:
-            self.skip("Encoding unsupported by implementation: %r" % encoding)
-        f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
-        try:
-            f.write(contents)
-        finally:
-            f.close()
-
-    def _test_external_case(self, testline, coding="ascii", modulelevel="",
-            suffix=""):
-        """Create and run a test case in a seperate module"""
-        self._setup_external_case(testline, coding, modulelevel, suffix)
-        return self._run_external_case()
-
-    def _setup_external_case(self, testline, coding="ascii", modulelevel="",
-            suffix=""):
-        """Create a test case in a seperate module"""
-        _, prefix, self.modname = self.id().rsplit(".", 2)
-        self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
-        self.addCleanup(shutil.rmtree, self.dir)
-        self._write_module(self.modname, coding,
-            # Older Python 2 versions don't see a coding declaration in a
-            # docstring so it has to be in a comment, but then we can't
-            # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
-            "# coding: %s\n"
-            "import testtools\n"
-            "%s\n"
-            "class Test(testtools.TestCase):\n"
-            "    def runTest(self):\n"
-            "        %s\n" % (coding, modulelevel, testline))
-
-    def _run_external_case(self):
-        """Run the prepared test case in a seperate module"""
-        sys.path.insert(0, self.dir)
-        self.addCleanup(sys.path.remove, self.dir)
-        module = __import__(self.modname)
-        self.addCleanup(sys.modules.pop, self.modname)
-        stream = StringIO()
-        self._run(stream, module.Test())
-        return stream.getvalue()
-
-    def _get_sample_text(self, encoding="unicode_internal"):
-        if encoding is None and str_is_unicode:
-           encoding = "unicode_internal"
-        for u in self._sample_texts:
-            try:
-                b = u.encode(encoding)
-                if u == b.decode(encoding):
-                   if str_is_unicode:
-                       return u, u
-                   return u, b
-            except (LookupError, UnicodeError):
-                pass
-        self.skip("Could not find a sample text for encoding: %r" % encoding)
-
-    def _as_output(self, text):
-        return text
-
-    def test_non_ascii_failure_string(self):
-        """Assertion contents can be non-ascii and should get decoded"""
-        text, raw = self._get_sample_text(_get_exception_encoding())
-        textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
-        self.assertIn(self._as_output(text), textoutput)
-
-    def test_non_ascii_failure_string_via_exec(self):
-        """Assertion via exec can be non-ascii and still gets decoded"""
-        text, raw = self._get_sample_text(_get_exception_encoding())
-        textoutput = self._test_external_case(
-            testline='exec ("self.fail(%s)")' % _r(raw))
-        self.assertIn(self._as_output(text), textoutput)
-
-    def test_control_characters_in_failure_string(self):
-        """Control characters in assertions should be escaped"""
-        textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
-        self.expectFailure("Defense against the beeping horror unimplemented",
-            self.assertNotIn, self._as_output("\a\a\a"), textoutput)
-        self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
-
-    def _local_os_error_matcher(self):
-        if sys.version_info > (3, 3):
-            return MatchesAny(Contains("FileExistsError: "),
-                              Contains("PermissionError: "))
-        elif os.name != "nt":
-            return Contains(self._as_output("OSError: "))
-        else:
-            return Contains(self._as_output("WindowsError: "))
-
-    def test_os_error(self):
-        """Locale error messages from the OS shouldn't break anything"""
-        textoutput = self._test_external_case(
-            modulelevel="import os",
-            testline="os.mkdir('/')")
-        self.assertThat(textoutput, self._local_os_error_matcher())
-
-    def test_assertion_text_shift_jis(self):
-        """A terminal raw backslash in an encoded string is weird but fine"""
-        example_text = _u("\u5341")
-        textoutput = self._test_external_case(
-            coding="shift_jis",
-            testline="self.fail('%s')" % example_text)
-        if str_is_unicode:
-            output_text = example_text
-        else:
-            output_text = example_text.encode("shift_jis").decode(
-                _get_exception_encoding(), "replace")
-        self.assertIn(self._as_output("AssertionError: %s" % output_text),
-            textoutput)
-
-    def test_file_comment_iso2022_jp(self):
-        """Control character escapes must be preserved if valid encoding"""
-        example_text, _ = self._get_sample_text("iso2022_jp")
-        textoutput = self._test_external_case(
-            coding="iso2022_jp",
-            testline="self.fail('Simple') # %s" % example_text)
-        self.assertIn(self._as_output(example_text), textoutput)
-
-    def test_unicode_exception(self):
-        """Exceptions that can be formated losslessly as unicode should be"""
-        example_text, _ = self._get_sample_text()
-        exception_class = (
-            "class FancyError(Exception):\n"
-            # A __unicode__ method does nothing on py3k but the default works
-            "    def __unicode__(self):\n"
-            "        return self.args[0]\n")
-        textoutput = self._test_external_case(
-            modulelevel=exception_class,
-            testline="raise FancyError(%s)" % _r(example_text))
-        self.assertIn(self._as_output(example_text), textoutput)
-
-    def test_unprintable_exception(self):
-        """A totally useless exception instance still prints something"""
-        exception_class = (
-            "class UnprintableError(Exception):\n"
-            "    def __str__(self):\n"
-            "        raise RuntimeError\n"
-            "    def __unicode__(self):\n"
-            "        raise RuntimeError\n"
-            "    def __repr__(self):\n"
-            "        raise RuntimeError\n")
-        textoutput = self._test_external_case(
-            modulelevel=exception_class,
-            testline="raise UnprintableError")
-        self.assertIn(self._as_output(
-            "UnprintableError: <unprintable UnprintableError object>\n"),
-            textoutput)
-
-    def test_non_ascii_dirname(self):
-        """Script paths in the traceback can be non-ascii"""
-        text, raw = self._get_sample_text(sys.getfilesystemencoding())
-        textoutput = self._test_external_case(
-            # Avoid bug in Python 3 by giving a unicode source encoding rather
-            # than just ascii which raises a SyntaxError with no other details
-            coding="utf-8",
-            testline="self.fail('Simple')",
-            suffix=raw)
-        self.assertIn(self._as_output(text), textoutput)
-
-    def test_syntax_error(self):
-        """Syntax errors should still have fancy special-case formatting"""
-        textoutput = self._test_external_case("exec ('f(a, b c)')")
-        self.assertIn(self._as_output(
-            '  File "<string>", line 1\n'
-            '    f(a, b c)\n'
-            + ' ' * self._error_on_character +
-            '          ^\n'
-            'SyntaxError: '
-            ), textoutput)
-
-    def test_syntax_error_malformed(self):
-        """Syntax errors with bogus parameters should break anything"""
-        textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
-        self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
-
-    def test_syntax_error_import_binary(self):
-        """Importing a binary file shouldn't break SyntaxError formatting"""
-        self._setup_external_case("import bad")
-        f = open(os.path.join(self.dir, "bad.py"), "wb")
-        try:
-            f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
-        finally:
-            f.close()
-        textoutput = self._run_external_case()
-        matches_error = MatchesAny(
-            Contains('\nTypeError: '), Contains('\nSyntaxError: '))
-        self.assertThat(textoutput, matches_error)
-
-    def test_syntax_error_line_iso_8859_1(self):
-        """Syntax error on a latin-1 line shows the line decoded"""
-        text, raw = self._get_sample_text("iso-8859-1")
-        textoutput = self._setup_external_case("import bad")
-        self._write_module("bad", "iso-8859-1",
-            "# coding: iso-8859-1\n! = 0 # %s\n" % text)
-        textoutput = self._run_external_case()
-        self.assertIn(self._as_output(_u(
-            #'bad.py", line 2\n'
-            '    ! = 0 # %s\n'
-            '    ^\n'
-            'SyntaxError: ') %
-            (text,)), textoutput)
-
-    def test_syntax_error_line_iso_8859_5(self):
-        """Syntax error on a iso-8859-5 line shows the line decoded"""
-        text, raw = self._get_sample_text("iso-8859-5")
-        textoutput = self._setup_external_case("import bad")
-        self._write_module("bad", "iso-8859-5",
-            "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
-        textoutput = self._run_external_case()
-        self.assertThat(
-            textoutput,
-            MatchesRegex(
-                self._as_output(_u(
-                #'bad.py", line 2\n'
-                '.*%% = 0 # %s\n'
-                + ' ' * self._error_on_character +
-                '\\s*\\^\n'
-                'SyntaxError:.*') %
-                (text,)),
-            re.MULTILINE | re.DOTALL)
-        )
-
-    def test_syntax_error_line_euc_jp(self):
-        """Syntax error on a euc_jp line shows the line decoded"""
-        text, raw = self._get_sample_text("euc_jp")
-        textoutput = self._setup_external_case("import bad")
-        self._write_module("bad", "euc_jp",
-            "# coding: euc_jp\n$ = 0 # %s\n" % text)
-        textoutput = self._run_external_case()
-        # pypy uses cpython's multibyte codecs so has their behavior here
-        if self._is_pypy:
-            self._error_on_character = True
-        self.assertIn(self._as_output(_u(
-            #'bad.py", line 2\n'
-            '    $ = 0 # %s\n'
-            + ' ' * self._error_on_character +
-            '   ^\n'
-            'SyntaxError: ') %
-            (text,)), textoutput)
-
-    def test_syntax_error_line_utf_8(self):
-        """Syntax error on a utf-8 line shows the line decoded"""
-        text, raw = self._get_sample_text("utf-8")
-        textoutput = self._setup_external_case("import bad")
-        self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
-        textoutput = self._run_external_case()
-        self.assertThat(
-            textoutput,
-            MatchesRegex(
-                self._as_output(_u(
-                    '.*bad.py", line 1\n'
-                    '\\s*\\^ = 0 # %s\n'
-                    + ' ' * self._error_on_character +
-                    '\\s*\\^\n'
-                    'SyntaxError:.*') % text),
-                re.M | re.S)
-        )
-
-
-class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
-    """Test that running under unittest produces clean ascii strings"""
-
-    def _run(self, stream, test):
-        from unittest import TextTestRunner as _Runner
-        return _Runner(stream).run(test)
-
-    def _as_output(self, text):
-        if str_is_unicode:
-            return text
-        return text.encode("utf-8")
-
-
-class TestDetailsToStr(TestCase):
-
-    def test_no_details(self):
-        string = _details_to_str({})
-        self.assertThat(string, Equals(''))
-
-    def test_binary_content(self):
-        content = content_from_stream(
-            StringIO('foo'), content_type=ContentType('image', 'jpeg'))
-        string = _details_to_str({'attachment': content})
-        self.assertThat(
-            string, Equals("""\
-Binary content:
-  attachment (image/jpeg)
-"""))
-
-    def test_single_line_content(self):
-        content = text_content('foo')
-        string = _details_to_str({'attachment': content})
-        self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
-
-    def test_multi_line_text_content(self):
-        content = text_content('foo\nbar\nbaz')
-        string = _details_to_str({'attachment': content})
-        self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
-
-    def test_special_text_content(self):
-        content = text_content('foo')
-        string = _details_to_str({'attachment': content}, special='attachment')
-        self.assertThat(string, Equals('foo\n'))
-
-    def test_multiple_text_content(self):
-        string = _details_to_str(
-            {'attachment': text_content('foo\nfoo'),
-             'attachment-1': text_content('bar\nbar')})
-        self.assertThat(
-            string, Equals('attachment: {{{\n'
-                           'foo\n'
-                           'foo\n'
-                           '}}}\n'
-                           '\n'
-                           'attachment-1: {{{\n'
-                           'bar\n'
-                           'bar\n'
-                           '}}}\n'))
-
-    def test_empty_attachment(self):
-        string = _details_to_str({'attachment': text_content('')})
-        self.assertThat(
-            string, Equals("""\
-Empty attachments:
-  attachment
-"""))
-
-    def test_lots_of_different_attachments(self):
-        jpg = lambda x: content_from_stream(
-            StringIO(x), ContentType('image', 'jpeg'))
-        attachments = {
-            'attachment': text_content('foo'),
-            'attachment-1': text_content('traceback'),
-            'attachment-2': jpg('pic1'),
-            'attachment-3': text_content('bar'),
-            'attachment-4': text_content(''),
-            'attachment-5': jpg('pic2'),
-            }
-        string = _details_to_str(attachments, special='attachment-1')
-        self.assertThat(
-            string, Equals("""\
-Binary content:
-  attachment-2 (image/jpeg)
-  attachment-5 (image/jpeg)
-Empty attachments:
-  attachment-4
-
-attachment: {{{foo}}}
-attachment-3: {{{bar}}}
-
-traceback
-"""))
-
-
-class TestByTestResultTests(TestCase):
-
-    def setUp(self):
-        super(TestByTestResultTests, self).setUp()
-        self.log = []
-        self.result = TestByTestResult(self.on_test)
-        now = iter(range(5))
-        self.result._now = lambda: advance_iterator(now)
-
-    def assertCalled(self, **kwargs):
-        defaults = {
-            'test': self,
-            'tags': set(),
-            'details': None,
-            'start_time': 0,
-            'stop_time': 1,
-            }
-        defaults.update(kwargs)
-        self.assertEqual([defaults], self.log)
-
-    def on_test(self, **kwargs):
-        self.log.append(kwargs)
-
-    def test_no_tests_nothing_reported(self):
-        self.result.startTestRun()
-        self.result.stopTestRun()
-        self.assertEqual([], self.log)
-
-    def test_add_success(self):
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertCalled(status='success')
-
-    def test_add_success_details(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addSuccess(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', details=details)
-
-    def test_global_tags(self):
-        self.result.tags(['foo'], [])
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', tags=set(['foo']))
-
-    def test_local_tags(self):
-        self.result.tags(['foo'], [])
-        self.result.startTest(self)
-        self.result.tags(['bar'], [])
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', tags=set(['foo', 'bar']))
-
-    def test_add_error(self):
-        self.result.startTest(self)
-        try:
-            1/0
-        except ZeroDivisionError:
-            error = sys.exc_info()
-        self.result.addError(self, error)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='error',
-            details={'traceback': TracebackContent(error, self)})
-
-    def test_add_error_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addError(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='error', details=details)
-
-    def test_add_failure(self):
-        self.result.startTest(self)
-        try:
-            self.fail("intentional failure")
-        except self.failureException:
-            failure = sys.exc_info()
-        self.result.addFailure(self, failure)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='failure',
-            details={'traceback': TracebackContent(failure, self)})
-
-    def test_add_failure_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addFailure(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='failure', details=details)
-
-    def test_add_xfail(self):
-        self.result.startTest(self)
-        try:
-            1/0
-        except ZeroDivisionError:
-            error = sys.exc_info()
-        self.result.addExpectedFailure(self, error)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='xfail',
-            details={'traceback': TracebackContent(error, self)})
-
-    def test_add_xfail_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addExpectedFailure(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='xfail', details=details)
-
-    def test_add_unexpected_success(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addUnexpectedSuccess(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', details=details)
-
-    def test_add_skip_reason(self):
-        self.result.startTest(self)
-        reason = self.getUniqueString()
-        self.result.addSkip(self, reason)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='skip', details={'reason': text_content(reason)})
-
-    def test_add_skip_details(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addSkip(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='skip', details=details)
-
-    def test_twice(self):
-        self.result.startTest(self)
-        self.result.addSuccess(self, details={'foo': 'bar'})
-        self.result.stopTest(self)
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertEqual(
-            [{'test': self,
-              'status': 'success',
-              'start_time': 0,
-              'stop_time': 1,
-              'tags': set(),
-              'details': {'foo': 'bar'}},
-             {'test': self,
-              'status': 'success',
-              'start_time': 2,
-              'stop_time': 3,
-              'tags': set(),
-              'details': None},
-             ],
-            self.log)
-
-
-class TestTagger(TestCase):
-
-    def test_tags_tests(self):
-        result = ExtendedTestResult()
-        tagger = Tagger(result, set(['foo']), set(['bar']))
-        test1, test2 = self, make_test()
-        tagger.startTest(test1)
-        tagger.addSuccess(test1)
-        tagger.stopTest(test1)
-        tagger.startTest(test2)
-        tagger.addSuccess(test2)
-        tagger.stopTest(test2)
-        self.assertEqual(
-            [('startTest', test1),
-             ('tags', set(['foo']), set(['bar'])),
-             ('addSuccess', test1),
-             ('stopTest', test1),
-             ('startTest', test2),
-             ('tags', set(['foo']), set(['bar'])),
-             ('addSuccess', test2),
-             ('stopTest', test2),
-             ], result._events)
-
-
-class TestTimestampingStreamResult(TestCase):
-
-    def test_startTestRun(self):
-        result = TimestampingStreamResult(LoggingStreamResult())
-        result.startTestRun()
-        self.assertEqual([('startTestRun',)], result.targets[0]._events)
-
-    def test_stopTestRun(self):
-        result = TimestampingStreamResult(LoggingStreamResult())
-        result.stopTestRun()
-        self.assertEqual([('stopTestRun',)], result.targets[0]._events)
-
-    def test_status_no_timestamp(self):
-        result = TimestampingStreamResult(LoggingStreamResult())
-        result.status(test_id="A", test_status="B", test_tags="C",
-            runnable="D", file_name="E", file_bytes=b"F", eof=True,
-            mime_type="G", route_code="H")
-        events = result.targets[0]._events
-        self.assertThat(events, HasLength(1))
-        self.assertThat(events[0], HasLength(11))
-        self.assertEqual(
-            ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
-            events[0][:10])
-        self.assertNotEqual(None, events[0][10])
-        self.assertIsInstance(events[0][10], datetime.datetime)
-
-    def test_status_timestamp(self):
-        result = TimestampingStreamResult(LoggingStreamResult())
-        result.status(timestamp="F")
-        self.assertEqual("F", result.targets[0]._events[0][10])
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_testsuite.py b/lib/testtools/testtools/tests/test_testsuite.py
deleted file mode 100644
index 3bbe63d..0000000
--- a/lib/testtools/testtools/tests/test_testsuite.py
+++ /dev/null
@@ -1,277 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Test ConcurrentTestSuite and related things."""
-
-__metaclass__ = type
-
-import doctest
-from functools import partial
-import sys
-import unittest
-
-from extras import try_import
-
-from testtools import (
-    ConcurrentTestSuite,
-    ConcurrentStreamTestSuite,
-    iterate_tests,
-    PlaceHolder,
-    TestByTestResult,
-    TestCase,
-    )
-from testtools.compat import _b, _u
-from testtools.matchers import DocTestMatches
-from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
-from testtools.tests.helpers import LoggingResult
-from testtools.testresult.doubles import StreamResult as LoggingStream
-
-FunctionFixture = try_import('fixtures.FunctionFixture')
-
-class Sample(TestCase):
-    def __hash__(self):
-        return id(self)
-    def test_method1(self):
-        pass
-    def test_method2(self):
-        pass
-
-
-class TestConcurrentTestSuiteRun(TestCase):
-
-    def test_broken_test(self):
-        log = []
-        def on_test(test, status, start_time, stop_time, tags, details):
-            log.append((test.id(), status, set(details.keys())))
-        class BrokenTest(object):
-            # Simple break - no result parameter to run()
-            def __call__(self):
-                pass
-            run = __call__
-        original_suite = unittest.TestSuite([BrokenTest()])
-        suite = ConcurrentTestSuite(original_suite, self.split_suite)
-        suite.run(TestByTestResult(on_test))
-        self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
-
-    def test_trivial(self):
-        log = []
-        result = LoggingResult(log)
-        test1 = Sample('test_method1')
-        test2 = Sample('test_method2')
-        original_suite = unittest.TestSuite([test1, test2])
-        suite = ConcurrentTestSuite(original_suite, self.split_suite)
-        suite.run(result)
-        # log[0] is the timestamp for the first test starting.
-        test1 = log[1][1]
-        test2 = log[-1][1]
-        self.assertIsInstance(test1, Sample)
-        self.assertIsInstance(test2, Sample)
-        self.assertNotEqual(test1.id(), test2.id())
-
-    def test_wrap_result(self):
-        # ConcurrentTestSuite has a hook for wrapping the per-thread result.
-        wrap_log = []
-
-        def wrap_result(thread_safe_result, thread_number):
-            wrap_log.append(
-                (thread_safe_result.result.decorated, thread_number))
-            return thread_safe_result
-
-        result_log = []
-        result = LoggingResult(result_log)
-        test1 = Sample('test_method1')
-        test2 = Sample('test_method2')
-        original_suite = unittest.TestSuite([test1, test2])
-        suite = ConcurrentTestSuite(
-            original_suite, self.split_suite, wrap_result=wrap_result)
-        suite.run(result)
-        self.assertEqual(
-            [(result, 0),
-             (result, 1),
-             ], wrap_log)
-        # Smoke test to make sure everything ran OK.
-        self.assertNotEqual([], result_log)
-
-    def split_suite(self, suite):
-        return list(iterate_tests(suite))
-
-
-class TestConcurrentStreamTestSuiteRun(TestCase):
-
-    def test_trivial(self):
-        result = LoggingStream()
-        test1 = Sample('test_method1')
-        test2 = Sample('test_method2')
-        cases = lambda:[(test1, '0'), (test2, '1')]
-        suite = ConcurrentStreamTestSuite(cases)
-        suite.run(result)
-        def freeze(set_or_none):
-            if set_or_none is None:
-                return set_or_none
-            return frozenset(set_or_none)
-        # Ignore event order: we're testing the code is all glued together,
-        # which just means we can pump events through and they get route codes
-        # added appropriately.
-        self.assertEqual(set([
-            ('status',
-             'testtools.tests.test_testsuite.Sample.test_method1',
-             'inprogress',
-             None,
-             True,
-             None,
-             None,
-             False,
-             None,
-             '0',
-             None,
-             ),
-            ('status',
-             'testtools.tests.test_testsuite.Sample.test_method1',
-             'success',
-             frozenset(),
-             True,
-             None,
-             None,
-             False,
-             None,
-             '0',
-             None,
-             ),
-            ('status',
-             'testtools.tests.test_testsuite.Sample.test_method2',
-             'inprogress',
-             None,
-             True,
-             None,
-             None,
-             False,
-             None,
-             '1',
-             None,
-             ),
-            ('status',
-             'testtools.tests.test_testsuite.Sample.test_method2',
-             'success',
-             frozenset(),
-             True,
-             None,
-             None,
-             False,
-             None,
-             '1',
-             None,
-             ),
-            ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
-                for event in result._events))
-
-    def test_broken_runner(self):
-        # If the object called breaks, the stream is informed about it
-        # regardless.
-        class BrokenTest(object):
-            # broken - no result parameter!
-            def __call__(self):
-                pass
-            def run(self):
-                pass
-        result = LoggingStream()
-        cases = lambda:[(BrokenTest(), '0')]
-        suite = ConcurrentStreamTestSuite(cases)
-        suite.run(result)
-        events = result._events
-        # Check the traceback loosely.
-        self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
-Traceback (most recent call last):
-  File "...testtools/testsuite.py", line ..., in _run_test
-    test.run(process_result)
-TypeError: run() takes ...1 ...argument...2...given...
-""", doctest.ELLIPSIS))
-        events = [event[0:10] + (None,) for event in events]
-        events[1] = events[1][:6] + (None,) + events[1][7:]
-        self.assertEqual([
-            ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
-            ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
-             True,
-             'text/x-traceback; charset="utf8"; language="python"',
-             '0',
-             None),
-             ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
-            ], events)
-
-    def split_suite(self, suite):
-        tests = list(enumerate(iterate_tests(suite)))
-        return [(test, _u(str(pos))) for pos, test in tests]
-
-
-class TestFixtureSuite(TestCase):
-
-    def setUp(self):
-        super(TestFixtureSuite, self).setUp()
-        if FunctionFixture is None:
-            self.skip("Need fixtures")
-
-    def test_fixture_suite(self):
-        log = []
-        class Sample(TestCase):
-            def test_one(self):
-                log.append(1)
-            def test_two(self):
-                log.append(2)
-        fixture = FunctionFixture(
-            lambda: log.append('setUp'),
-            lambda fixture: log.append('tearDown'))
-        suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
-        suite.run(LoggingResult([]))
-        self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
-
-    def test_fixture_suite_sort(self):
-        log = []
-        class Sample(TestCase):
-            def test_one(self):
-                log.append(1)
-            def test_two(self):
-                log.append(2)
-        fixture = FunctionFixture(
-            lambda: log.append('setUp'),
-            lambda fixture: log.append('tearDown'))
-        suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
-        self.assertRaises(ValueError, suite.sort_tests)
-
-
-class TestSortedTests(TestCase):
-
-    def test_sorts_custom_suites(self):
-        a = PlaceHolder('a')
-        b = PlaceHolder('b')
-        class Subclass(unittest.TestSuite):
-            def sort_tests(self):
-                self._tests = sorted_tests(self, True)
-        input_suite = Subclass([b, a])
-        suite = sorted_tests(input_suite)
-        self.assertEqual([a, b], list(iterate_tests(suite)))
-        self.assertEqual([input_suite], list(iter(suite)))
-
-    def test_custom_suite_without_sort_tests_works(self):
-        a = PlaceHolder('a')
-        b = PlaceHolder('b')
-        class Subclass(unittest.TestSuite):pass
-        input_suite = Subclass([b, a])
-        suite = sorted_tests(input_suite)
-        self.assertEqual([b, a], list(iterate_tests(suite)))
-        self.assertEqual([input_suite], list(iter(suite)))
-
-    def test_sorts_simple_suites(self):
-        a = PlaceHolder('a')
-        b = PlaceHolder('b')
-        suite = sorted_tests(unittest.TestSuite([b, a]))
-        self.assertEqual([a, b], list(iterate_tests(suite)))
-
-    def test_duplicate_simple_suites(self):
-        a = PlaceHolder('a')
-        b = PlaceHolder('b')
-        c = PlaceHolder('a')
-        self.assertRaises(
-            ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
-
-
-def test_suite():
-    from unittest import TestLoader
-    return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/testtools/tests/test_with_with.py b/lib/testtools/testtools/tests/test_with_with.py
deleted file mode 100644
index f26f0f8..0000000
--- a/lib/testtools/testtools/tests/test_with_with.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2011 testtools developers. See LICENSE for details.
-
-from __future__ import with_statement
-
-import sys
-
-from testtools import (
-    ExpectedException,
-    TestCase,
-    )
-from testtools.matchers import (
-    AfterPreprocessing,
-    Equals,
-    EndsWith,
-    )
-
-
-class TestExpectedException(TestCase):
-    """Test the ExpectedException context manager."""
-
-    def test_pass_on_raise(self):
-        with ExpectedException(ValueError, 'tes.'):
-            raise ValueError('test')
-
-    def test_pass_on_raise_matcher(self):
-        with ExpectedException(
-            ValueError, AfterPreprocessing(str, Equals('test'))):
-            raise ValueError('test')
-
-    def test_raise_on_text_mismatch(self):
-        try:
-            with ExpectedException(ValueError, 'tes.'):
-                raise ValueError('mismatch')
-        except AssertionError:
-            e = sys.exc_info()[1]
-            self.assertEqual("'mismatch' does not match /tes./", str(e))
-        else:
-            self.fail('AssertionError not raised.')
-
-    def test_raise_on_general_mismatch(self):
-        matcher = AfterPreprocessing(str, Equals('test'))
-        value_error = ValueError('mismatch')
-        try:
-            with ExpectedException(ValueError, matcher):
-                raise value_error
-        except AssertionError:
-            e = sys.exc_info()[1]
-            self.assertEqual(matcher.match(value_error).describe(), str(e))
-        else:
-            self.fail('AssertionError not raised.')
-
-    def test_raise_on_error_mismatch(self):
-        try:
-            with ExpectedException(TypeError, 'tes.'):
-                raise ValueError('mismatch')
-        except ValueError:
-            e = sys.exc_info()[1]
-            self.assertEqual('mismatch', str(e))
-        else:
-            self.fail('ValueError not raised.')
-
-    def test_raise_if_no_exception(self):
-        try:
-            with ExpectedException(TypeError, 'tes.'):
-                pass
-        except AssertionError:
-            e = sys.exc_info()[1]
-            self.assertEqual('TypeError not raised.', str(e))
-        else:
-            self.fail('AssertionError not raised.')
-
-    def test_pass_on_raise_any_message(self):
-        with ExpectedException(ValueError):
-            raise ValueError('whatever')
-
-    def test_annotate(self):
-        def die():
-            with ExpectedException(ValueError, msg="foo"):
-                pass
-        exc = self.assertRaises(AssertionError, die)
-        self.assertThat(exc.args[0], EndsWith(': foo'))
-
-    def test_annotated_matcher(self):
-        def die():
-            with ExpectedException(ValueError, 'bar', msg="foo"):
-                pass
-        exc = self.assertRaises(AssertionError, die)
-        self.assertThat(exc.args[0], EndsWith(': foo'))
diff --git a/lib/testtools/testtools/testsuite.py b/lib/testtools/testtools/testsuite.py
deleted file mode 100644
index e2945f3..0000000
--- a/lib/testtools/testtools/testsuite.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
-
-"""Test suites and related things."""
-
-__metaclass__ = type
-__all__ = [
-  'ConcurrentTestSuite',
-  'ConcurrentStreamTestSuite',
-  'filter_by_ids',
-  'iterate_tests',
-  'sorted_tests',
-  ]
-
-import sys
-import threading
-import unittest
-
-from extras import safe_hasattr, try_imports
-
-Queue = try_imports(['Queue.Queue', 'queue.Queue'])
-
-import testtools
-
-
-def iterate_tests(test_suite_or_case):
-    """Iterate through all of the test cases in 'test_suite_or_case'."""
-    try:
-        suite = iter(test_suite_or_case)
-    except TypeError:
-        yield test_suite_or_case
-    else:
-        for test in suite:
-            for subtest in iterate_tests(test):
-                yield subtest
-
-
-class ConcurrentTestSuite(unittest.TestSuite):
-    """A TestSuite whose run() calls out to a concurrency strategy."""
-
-    def __init__(self, suite, make_tests, wrap_result=None):
-        """Create a ConcurrentTestSuite to execute suite.
-
-        :param suite: A suite to run concurrently.
-        :param make_tests: A helper function to split the tests in the
-            ConcurrentTestSuite into some number of concurrently executing
-            sub-suites. make_tests must take a suite, and return an iterable
-            of TestCase-like object, each of which must have a run(result)
-            method.
-        :param wrap_result: An optional function that takes a thread-safe
-            result and a thread number and must return a ``TestResult``
-            object. If not provided, then ``ConcurrentTestSuite`` will just
-            use a ``ThreadsafeForwardingResult`` wrapped around the result
-            passed to ``run()``.
-        """
-        super(ConcurrentTestSuite, self).__init__([suite])
-        self.make_tests = make_tests
-        if wrap_result:
-            self._wrap_result = wrap_result
-
-    def _wrap_result(self, thread_safe_result, thread_number):
-        """Wrap a thread-safe result before sending it test results.
-
-        You can either override this in a subclass or pass your own
-        ``wrap_result`` in to the constructor.  The latter is preferred.
-        """
-        return thread_safe_result
-
-    def run(self, result):
-        """Run the tests concurrently.
-
-        This calls out to the provided make_tests helper, and then serialises
-        the results so that result only sees activity from one TestCase at
-        a time.
-
-        ConcurrentTestSuite provides no special mechanism to stop the tests
-        returned by make_tests, it is up to the make_tests to honour the
-        shouldStop attribute on the result object they are run with, which will
-        be set if an exception is raised in the thread which
-        ConcurrentTestSuite.run is called in.
-        """
-        tests = self.make_tests(self)
-        try:
-            threads = {}
-            queue = Queue()
-            semaphore = threading.Semaphore(1)
-            for i, test in enumerate(tests):
-                process_result = self._wrap_result(
-                    testtools.ThreadsafeForwardingResult(result, semaphore), i)
-                reader_thread = threading.Thread(
-                    target=self._run_test, args=(test, process_result, queue))
-                threads[test] = reader_thread, process_result
-                reader_thread.start()
-            while threads:
-                finished_test = queue.get()
-                threads[finished_test][0].join()
-                del threads[finished_test]
-        except:
-            for thread, process_result in threads.values():
-                process_result.stop()
-            raise
-
-    def _run_test(self, test, process_result, queue):
-        try:
-            try:
-                test.run(process_result)
-            except Exception as e:
-                # The run logic itself failed.
-                case = testtools.ErrorHolder(
-                    "broken-runner",
-                    error=sys.exc_info())
-                case.run(process_result)
-        finally:
-            queue.put(test)
-
-
-class ConcurrentStreamTestSuite(object):
-    """A TestSuite whose run() parallelises."""
-
-    def __init__(self, make_tests):
-        """Create a ConcurrentTestSuite to execute tests returned by make_tests.
-
-        :param make_tests: A helper function that should return some number
-            of concurrently executable test suite / test case objects.
-            make_tests must take no parameters and return an iterable of
-            tuples. Each tuple must be of the form (case, route_code), where
-            case is a TestCase-like object with a run(result) method, and
-            route_code is either None or a unicode string.
-        """
-        super(ConcurrentStreamTestSuite, self).__init__()
-        self.make_tests = make_tests
-
-    def run(self, result):
-        """Run the tests concurrently.
-
-        This calls out to the provided make_tests helper to determine the
-        concurrency to use and to assign routing codes to each worker.
-
-        ConcurrentTestSuite provides no special mechanism to stop the tests
-        returned by make_tests, it is up to the made tests to honour the
-        shouldStop attribute on the result object they are run with, which will
-        be set if the test run is to be aborted.
-
-        The tests are run with an ExtendedToStreamDecorator wrapped around a
-        StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
-        the queue and forwards them to result. Tests can therefore be either
-        original unittest tests (or compatible tests), or new tests that emit
-        StreamResult events directly.
-
-        :param result: A StreamResult instance. The caller is responsible for
-            calling startTestRun on this instance prior to invoking suite.run,
-            and stopTestRun subsequent to the run method returning.
-        """
-        tests = self.make_tests()
-        try:
-            threads = {}
-            queue = Queue()
-            for test, route_code in tests:
-                to_queue = testtools.StreamToQueue(queue, route_code)
-                process_result = testtools.ExtendedToStreamDecorator(
-                    testtools.TimestampingStreamResult(to_queue))
-                runner_thread = threading.Thread(
-                    target=self._run_test,
-                    args=(test, process_result, route_code))
-                threads[to_queue] = runner_thread, process_result
-                runner_thread.start()
-            while threads:
-                event_dict = queue.get()
-                event = event_dict.pop('event')
-                if event == 'status':
-                    result.status(**event_dict)
-                elif event == 'stopTestRun':
-                    thread = threads.pop(event_dict['result'])[0]
-                    thread.join()
-                elif event == 'startTestRun':
-                    pass
-                else:
-                    raise ValueError('unknown event type %r' % (event,))
-        except:
-            for thread, process_result in threads.values():
-                # Signal to each TestControl in the ExtendedToStreamDecorator
-                # that the thread should stop running tests and cleanup
-                process_result.stop()
-            raise
-
-    def _run_test(self, test, process_result, route_code):
-        process_result.startTestRun()
-        try:
-            try:
-                test.run(process_result)
-            except Exception as e:
-                # The run logic itself failed.
-                case = testtools.ErrorHolder(
-                    "broken-runner-'%s'" % (route_code,),
-                    error=sys.exc_info())
-                case.run(process_result)
-        finally:
-            process_result.stopTestRun()
-
-
-class FixtureSuite(unittest.TestSuite):
-
-    def __init__(self, fixture, tests):
-        super(FixtureSuite, self).__init__(tests)
-        self._fixture = fixture
-
-    def run(self, result):
-        self._fixture.setUp()
-        try:
-            super(FixtureSuite, self).run(result)
-        finally:
-            self._fixture.cleanUp()
-
-    def sort_tests(self):
-        self._tests = sorted_tests(self, True)
-
-
-def _flatten_tests(suite_or_case, unpack_outer=False):
-    try:
-        tests = iter(suite_or_case)
-    except TypeError:
-        # Not iterable, assume it's a test case.
-        return [(suite_or_case.id(), suite_or_case)]
-    if (type(suite_or_case) in (unittest.TestSuite,) or
-        unpack_outer):
-        # Plain old test suite (or any others we may add).
-        result = []
-        for test in tests:
-            # Recurse to flatten.
-            result.extend(_flatten_tests(test))
-        return result
-    else:
-        # Find any old actual test and grab its id.
-        suite_id = None
-        tests = iterate_tests(suite_or_case)
-        for test in tests:
-            suite_id = test.id()
-            break
-        # If it has a sort_tests method, call that.
-        if safe_hasattr(suite_or_case, 'sort_tests'):
-            suite_or_case.sort_tests()
-        return [(suite_id, suite_or_case)]
-
-
-def filter_by_ids(suite_or_case, test_ids):
-    """Remove tests from suite_or_case where their id is not in test_ids.
-
-    :param suite_or_case: A test suite or test case.
-    :param test_ids: Something that supports the __contains__ protocol.
-    :return: suite_or_case, unless suite_or_case was a case that itself
-        fails the predicate when it will return a new unittest.TestSuite with
-        no contents.
-
-    This helper exists to provide backwards compatability with older versions
-    of Python (currently all versions :)) that don't have a native
-    filter_by_ids() method on Test(Case|Suite).
-
-    For subclasses of TestSuite, filtering is done by:
-        - attempting to call suite.filter_by_ids(test_ids)
-        - if there is no method, iterating the suite and identifying tests to
-          remove, then removing them from _tests, manually recursing into
-          each entry.
-
-    For objects with an id() method - TestCases, filtering is done by:
-        - attempting to return case.filter_by_ids(test_ids)
-        - if there is no such method, checking for case.id() in test_ids
-          and returning case if it is, or TestSuite() if it is not.
-
-    For anything else, it is not filtered - it is returned as-is.
-
-    To provide compatability with this routine for a custom TestSuite, just
-    define a filter_by_ids() method that will return a TestSuite equivalent to
-    the original minus any tests not in test_ids.
-    Similarly to provide compatability for a custom TestCase that does
-    something unusual define filter_by_ids to return a new TestCase object
-    that will only run test_ids that are in the provided container. If none
-    would run, return an empty TestSuite().
-
-    The contract for this function does not require mutation - each filtered
-    object can choose to return a new object with the filtered tests. However
-    because existing custom TestSuite classes in the wild do not have this
-    method, we need a way to copy their state correctly which is tricky:
-    thus the backwards-compatible code paths attempt to mutate in place rather
-    than guessing how to reconstruct a new suite.
-    """
-    # Compatible objects
-    if safe_hasattr(suite_or_case, 'filter_by_ids'):
-        return suite_or_case.filter_by_ids(test_ids)
-    # TestCase objects.
-    if safe_hasattr(suite_or_case, 'id'):
-        if suite_or_case.id() in test_ids:
-            return suite_or_case
-        else:
-            return unittest.TestSuite()
-    # Standard TestSuites or derived classes [assumed to be mutable].
-    if isinstance(suite_or_case, unittest.TestSuite):
-        filtered = []
-        for item in suite_or_case:
-            filtered.append(filter_by_ids(item, test_ids))
-        suite_or_case._tests[:] = filtered
-    # Everything else:
-    return suite_or_case
-
-
-def sorted_tests(suite_or_case, unpack_outer=False):
-    """Sort suite_or_case while preserving non-vanilla TestSuites."""
-    # Duplicate test id can induce TypeError in Python 3.3.
-    # Detect the duplicate test id, raise exception when found.
-    seen = set()
-    for test_case in iterate_tests(suite_or_case):
-        test_id = test_case.id()
-        if test_id not in seen:
-            seen.add(test_id)
-        else:
-            raise ValueError('Duplicate test id detected: %s' % (test_id,))
-    tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
-    tests.sort()
-    return unittest.TestSuite([test for (sort_key, test) in tests])
diff --git a/lib/testtools/testtools/utils.py b/lib/testtools/testtools/utils.py
deleted file mode 100644
index 0f39d8f..0000000
--- a/lib/testtools/testtools/utils.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
-
-"""Utilities for dealing with stuff in unittest.
-
-Legacy - deprecated - use testtools.testsuite.iterate_tests
-"""
-
-import warnings
-warnings.warn("Please import iterate_tests from testtools.testsuite - "
-    "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2)
-
-from testtools.testsuite import iterate_tests
-
diff --git a/lib/update-external.sh b/lib/update-external.sh
index 61bcac3..efb4e67 100755
--- a/lib/update-external.sh
+++ b/lib/update-external.sh
@@ -19,7 +19,7 @@ rsync -avz --delete "$WORKDIR/subunit/" "$LIBDIR/subunit/"
 echo "Updating testtools..."
 git clone git://github.com/testing-cabal/testtools "$WORKDIR/testtools"
 rm -rf "$WORKDIR/testtools/.git"
-rsync -avz --delete "$WORKDIR/testtools/" "$LIBDIR/testtools/"
+rsync -avz --delete "$WORKDIR/testtools/" "$THIRD_PARTY_DIR/testtools/"
 
 echo "Updating dnspython..."
 git clone git://www.dnspython.org/dnspython.git "$WORKDIR/dnspython"
diff --git a/lib/wscript_build b/lib/wscript_build
index a3f304c..e7751fd 100644
--- a/lib/wscript_build
+++ b/lib/wscript_build
@@ -5,7 +5,6 @@ import os, Options
 # work out what python external libraries we need to install
 external_libs = {
     "subunit": "subunit/python/subunit",
-    "testtools": "testtools/testtools",
     }
 
 list = []
diff --git a/python/samba/tests/__init__.py b/python/samba/tests/__init__.py
index 667f548..84fc684 100644
--- a/python/samba/tests/__init__.py
+++ b/python/samba/tests/__init__.py
@@ -28,7 +28,7 @@ import tempfile
 
 samba.ensure_third_party_module("mimeparse", "mimeparse")
 samba.ensure_third_party_module("extras", "extras")
-samba.ensure_external_module("testtools", "testtools")
+samba.ensure_third_party_module("testtools", "testtools")
 
 # Other modules import these two classes from here, for convenience:
 from testtools.testcase import (
diff --git a/selftest/subunithelper.py b/selftest/subunithelper.py
index dddb29b..220f903 100644
--- a/selftest/subunithelper.py
+++ b/selftest/subunithelper.py
@@ -20,7 +20,7 @@ __all__ = ['parse_results']
 import samba
 samba.ensure_third_party_module("mimeparse", "mimeparse")
 samba.ensure_third_party_module("extras", "python-extras")
-samba.ensure_external_module("testtools", "testtools")
+samba.ensure_third_party_module("testtools", "testtools")
 def check_subunit(mod):
     try:
         __import__("subunit.run.TestProgram")
diff --git a/third_party/testtools/.gitignore b/third_party/testtools/.gitignore
new file mode 100644
index 0000000..acf9b74
--- /dev/null
+++ b/third_party/testtools/.gitignore
@@ -0,0 +1,18 @@
+__pycache__
+./build
+MANIFEST
+dist
+tags
+TAGS
+apidocs
+_trial_temp
+doc/_build
+.testrepository
+.lp_creds
+./testtools.egg-info
+*.pyc
+*.swp
+*~
+testtools.egg-info
+/build/
+/.env/
diff --git a/third_party/testtools/.gitreview b/third_party/testtools/.gitreview
new file mode 100644
index 0000000..5d15856
--- /dev/null
+++ b/third_party/testtools/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.testing-cabal.org
+port=29418
+project=testing-cabal/testtools.git
diff --git a/third_party/testtools/.testr.conf b/third_party/testtools/.testr.conf
new file mode 100644
index 0000000..e695109
--- /dev/null
+++ b/third_party/testtools/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=${PYTHON:-python} -m subunit.run $LISTOPT $IDOPTION testtools.tests.test_suite
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/third_party/testtools/.travis.yml b/third_party/testtools/.travis.yml
new file mode 100644
index 0000000..5e0e85a
--- /dev/null
+++ b/third_party/testtools/.travis.yml
@@ -0,0 +1,25 @@
+language: python
+
+python:
+  - "2.6"
+  - "2.7"
+  - "3.3"
+  - "pypy"
+
+# We have to pin Jinja2 < 2.7  for Python 3.2 because 2.7 drops/breaks support:
+# http://jinja.pocoo.org/docs/changelog/#version-2-7
+#
+# See also:
+# http://stackoverflow.com/questions/18252804/syntax-error-in-jinja-2-library
+matrix:
+  include:
+    - python: "3.2"
+      env: JINJA_REQ="jinja2<2.7"
+
+install:
+  - pip install -q --use-mirrors fixtures extras python-mimeparse $JINJA_REQ sphinx
+  - python setup.py -q install
+
+script:
+  - python -m testtools.run testtools.tests.test_suite
+  - make clean-sphinx docs
diff --git a/third_party/testtools/LICENSE b/third_party/testtools/LICENSE
new file mode 100644
index 0000000..21010cc
--- /dev/null
+++ b/third_party/testtools/LICENSE
@@ -0,0 +1,59 @@
+Copyright (c) 2008-2011 Jonathan M. Lange <jml at mumak.net> and the testtools
+authors.
+
+The testtools authors are:
+ * Canonical Ltd
+ * Twisted Matrix Labs
+ * Jonathan Lange
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
+ * Aaron Bentley
+ * Christian Kampka
+ * Gavin Panella
+ * Martin Pool
+ * Vincent Ladeuil
+ * Nikola Đipanov
+
+and are collectively referred to as "testtools developers".
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Some code in testtools/run.py taken from Python's unittest module:
+Copyright (c) 1999-2003 Steve Purcell
+Copyright (c) 2003-2010 Python Software Foundation
+
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE.  THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
diff --git a/third_party/testtools/MANIFEST.in b/third_party/testtools/MANIFEST.in
new file mode 100644
index 0000000..4619349
--- /dev/null
+++ b/third_party/testtools/MANIFEST.in
@@ -0,0 +1,10 @@
+include LICENSE
+include Makefile
+include MANIFEST.in
+include NEWS
+include README.rst
+include .gitignore
+graft doc
+graft doc/_static
+graft doc/_templates
+prune doc/_build
diff --git a/third_party/testtools/Makefile b/third_party/testtools/Makefile
new file mode 100644
index 0000000..c637123
--- /dev/null
+++ b/third_party/testtools/Makefile
@@ -0,0 +1,56 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
+PYTHON=python
+SOURCES=$(shell find testtools -name "*.py")
+
+check:
+	PYTHONPATH=$(PWD) $(PYTHON) -m testtools.run testtools.tests.test_suite
+
+TAGS: ${SOURCES}
+	ctags -e -R testtools/
+
+tags: ${SOURCES}
+	ctags -R testtools/
+
+clean: clean-sphinx
+	rm -f TAGS tags
+	find testtools -name "*.pyc" -exec rm '{}' \;
+
+prerelease:
+	# An existing MANIFEST breaks distutils sometimes. Avoid that.
+	-rm MANIFEST
+
+release:
+	./setup.py sdist bdist_wheel upload --sign
+	$(PYTHON) scripts/_lp_release.py
+
+snapshot: prerelease
+	./setup.py sdist bdist_wheel
+
+### Documentation ###
+
+apidocs:
+	# pydoctor emits deprecation warnings under Ubuntu 10.10 LTS
+	PYTHONWARNINGS='ignore::DeprecationWarning' \
+		pydoctor --make-html --add-package testtools \
+		--docformat=restructuredtext --project-name=testtools \
+		--project-url=https://github.com/testing-cabal/testtools
+
+doc/news.rst:
+	ln -s ../NEWS doc/news.rst
+
+docs: doc/news.rst docs-sphinx
+	rm doc/news.rst
+
+docs-sphinx: html-sphinx
+
+# Clean out generated documentation
+clean-sphinx:
+	cd doc && make clean
+
+# Build the html docs using Sphinx.
+html-sphinx:
+	cd doc && make html
+
+.PHONY: apidocs docs-sphinx clean-sphinx html-sphinx docs
+.PHONY: check clean prerelease release
diff --git a/third_party/testtools/NEWS b/third_party/testtools/NEWS
new file mode 100644
index 0000000..f061ae0
--- /dev/null
+++ b/third_party/testtools/NEWS
@@ -0,0 +1,1414 @@
+testtools NEWS
+++++++++++++++
+
+Changes and improvements to testtools_, grouped by release.
+
+
+NEXT
+~~~~
+
+Changes
+-------
+
+* Fixed unit tests which were failing under pypy due to a change in the way
+  pypy formats tracebacks. (Thomi Richards)
+
+* Make `testtools.content.text_content` error if anything other than text
+  is given as content. (Thomi Richards)
+
+* We now publish wheels of testtools. (Robert Collins, #issue84)
+
+1.1.0
+~~~~~
+
+Improvements
+------------
+
+* Exceptions in a ``fixture.getDetails`` method will no longer mask errors
+  raised from the same fixture's ``setUp`` method.
+  (Robert Collins, #1368440)
+
+1.0.0
+~~~~~
+
+Long overdue, we've adopted a backwards compatibility statement and recognized
+that we have plenty of users depending on our behaviour - calling our version
+1.0.0 is a recognition of that.
+
+Improvements
+------------
+
+* Fix a long-standing bug where tearDown and cleanUps would not be called if the
+  test run was interrupted. This should fix leaking external resources from
+  interrupted tests.
+  (Robert Collins, #1364188)
+
+* Fix a long-standing bug where calling sys.exit(0) from within a test would
+  cause the test suite to exit with 0, without reporting a failure of that
+  test. We still allow the test suite to be exited (since catching higher order
+  exceptions requires exceptional circumstances) but we now call a last-resort
+  handler on the TestCase, resulting in an error being reported for the test.
+  (Robert Collins, #1364188)
+
+* Fix an issue where tests skipped with the ``skip``* family of decorators would
+  still have their ``setUp`` and ``tearDown`` functions called.
+  (Thomi Richards, #https://github.com/testing-cabal/testtools/issues/86)
+
+* We have adopted a formal backwards compatibility statement (see hacking.rst)
+  (Robert Collins)
+
+0.9.39
+~~~~~~
+
+Brown paper bag release - 0.9.38 was broken for some users,
+_jython_aware_splitext was not defined entirely compatibly.
+(Robert Collins, #https://github.com/testing-cabal/testtools/issues/100)
+
+0.9.38
+~~~~~~
+
+Bug fixes for test importing.
+
+Improvements
+------------
+
+* Discovery import error detection wasn't implemented for python 2.6 (the
+  'discover' module). (Robert Collins)
+
+* Discovery now executes load_tests (if present) in __init__ in all packages.
+  (Robert Collins, http://bugs.python.org/issue16662)
+
+0.9.37
+~~~~~~
+
+Minor improvements to correctness.
+
+Changes
+-------
+
+* ``stdout`` is now correctly honoured on ``run.TestProgram`` - before the
+  runner objects would be created with no stdout parameter. If construction
+  fails, the previous parameter list is attempted, permitting compatibility
+  with Runner classes that don't accept stdout as a parameter.
+  (Robert Collins)
+
+* The ``ExtendedToStreamDecorator`` now handles content objects with one less
+  packet - the last packet of the source content is sent with EOF set rather
+  than an empty packet with EOF set being sent after the last packet of the
+  source content. (Robert Collins)
+
+0.9.36
+~~~~~~
+
+Welcome to our long overdue 0.9.36 release, which improves compatibility with
+Python3.4, adds assert_that, a function for using matchers without TestCase
+objects, and finally will error if you try to use setUp or tearDown twice -
+since that invariably leads to bad things of one sort or another happening.
+
+Changes
+-------
+
+* Error if ``setUp`` or ``tearDown`` are called twice.
+  (Robert Collins, #882884)
+
+* Make testtools compatible with the ``unittest.expectedFailure`` decorator in
+  Python 3.4. (Thomi Richards)
+
+
+Improvements
+------------
+
+* Introduce the assert_that function, which allows matchers to be used
+  independent of testtools.TestCase. (Daniel Watkins, #1243834)
+
+
+0.9.35
+~~~~~~
+
+Changes
+-------
+
+* Removed a number of code paths where Python 2.4 and Python 2.5 were
+  explicitly handled. (Daniel Watkins)
+
+Improvements
+------------
+
+* Added the ``testtools.TestCase.expectThat`` method, which implements
+  delayed assertions. (Thomi Richards)
+
+* Docs are now built as part of the Travis-CI build, reducing the chance of
+  Read The Docs being broken accidentally. (Daniel Watkins, #1158773)
+
+0.9.34
+~~~~~~
+
+Improvements
+------------
+
+* Added ability for ``testtools.TestCase`` instances to force a test to
+  fail, even if no assertions failed. (Thomi Richards)
+
+* Added ``testtools.content.StacktraceContent``, a content object that
+  automatically creates a ``StackLinesContent`` object containing the current
+  stack trace. (Thomi Richards)
+
+* ``AnyMatch`` is now exported properly in ``testtools.matchers``.
+  (Robert Collins, Rob Kennedy, github #44)
+
+* In Python 3.3, if there are duplicate test ids, tests.sort() will
+  fail and raise TypeError. Detect the duplicate test ids firstly in
+  sorted_tests() to ensure that all test ids are unique.
+  (Kui Shi, #1243922)
+
+* ``json_content`` is now in the ``__all__`` attribute for
+  ``testtools.content``. (Robert Collins)
+
+* Network tests now bind to 127.0.0.1 to avoid (even temporary) network
+  visible ports. (Benedikt Morbach, github #46)
+
+* Test listing now explicitly indicates by printing 'Failed to import' and
+  exiting (2) when an import has failed rather than only signalling through the
+  test name. (Robert Collins, #1245672)
+
+* ``test_compat.TestDetectEncoding.test_bom`` now works on Python 3.3 - the
+  corner case with euc_jp is no longer permitted in Python 3.3 so we can
+  skip it. (Martin [gz], #1251962)
+
+0.9.33
+~~~~~~
+
+Improvements
+------------
+
+* Added ``addDetailuniqueName`` method to ``testtools.TestCase`` class.
+  (Thomi Richards)
+
+* Removed some unused code from ``testtools.content.TracebackContent``.
+  (Thomi Richards)
+
+* Added ``testtools.StackLinesContent``: a content object for displaying
+  pre-processed stack lines. (Thomi Richards)
+
+* ``StreamSummary`` was calculating testsRun incorrectly: ``exists`` status
+  tests were counted as run tests, but they are not.
+  (Robert Collins, #1203728)
+
+0.9.32
+~~~~~~
+
+Regular maintenance release.  Special thanks to new contributor, Xiao Hanyu!
+
+Changes
+-------
+
+ * ``testttols.compat._format_exc_info`` has been refactored into several
+   smaller functions. (Thomi Richards)
+
+Improvements
+------------
+
+* Stacktrace filtering no longer hides unittest frames that are surrounded by
+  user frames. We will reenable this when we figure out a better algorithm for
+  retaining meaning. (Robert Collins, #1188420)
+
+* The compatibility code for skipped tests with unittest2 was broken.
+  (Robert Collins, #1190951)
+
+* Various documentation improvements (Clint Byrum, Xiao Hanyu).
+
+0.9.31
+~~~~~~
+
+Improvements
+------------
+
+* ``ExpectedException`` now accepts a msg parameter for describing an error,
+  much the same as assertEquals etc. (Robert Collins)
+
+0.9.30
+~~~~~~
+
+A new sort of TestResult, the StreamResult has been added, as a prototype for
+a revised standard library test result API.  Expect this API to change.
+Although we will try to preserve compatibility for early adopters, it is
+experimental and we might need to break it if it turns out to be unsuitable.
+
+Improvements
+------------
+* ``assertRaises`` works properly for exception classes that have custom
+  metaclasses
+
+* ``ConcurrentTestSuite`` was silently eating exceptions that propagate from
+  the test.run(result) method call. Ignoring them is fine in a normal test
+  runner, but when they happen in a different thread, the thread that called
+  suite.run() is not in the stack anymore, and the exceptions are lost. We now
+  create a synthetic test recording any such exception.
+  (Robert Collins, #1130429)
+
+* Fixed SyntaxError raised in ``_compat2x.py`` when installing via Python 3.
+  (Will Bond, #941958)
+
+* New class ``StreamResult`` which defines the API for the new result type.
+  (Robert Collins)
+
+* New support class ``ConcurrentStreamTestSuite`` for convenient construction
+  and utilisation of ``StreamToQueue`` objects. (Robert Collins)
+
+* New support class ``CopyStreamResult`` which forwards events onto multiple
+  ``StreamResult`` objects (each of which receives all the events).
+  (Robert Collins)
+
+* New support class ``StreamSummary`` which summarises a ``StreamResult``
+  stream compatibly with ``TestResult`` code. (Robert Collins)
+
+* New support class ``StreamTagger`` which adds or removes tags from
+  ``StreamResult`` events. (RobertCollins)
+
+* New support class ``StreamToDict`` which converts a ``StreamResult`` to a
+  series of dicts describing a test. Useful for writing trivial stream
+  analysers. (Robert Collins)
+
+* New support class ``TestControl`` which permits cancelling an in-progress
+  run. (Robert Collins)
+
+* New support class ``StreamFailFast`` which calls a ``TestControl`` instance
+  to abort the test run when a failure is detected. (Robert Collins)
+
+* New support class ``ExtendedToStreamDecorator`` which translates both regular
+  unittest TestResult API calls and the ExtendedTestResult API which testtools
+  has supported into the StreamResult API. ExtendedToStreamDecorator also
+  forwards calls made in the StreamResult API, permitting it to be used
+  anywhere a StreamResult is used. Key TestResult query methods like
+  wasSuccessful and shouldStop are synchronised with the StreamResult API
+  calls, but the detailed statistics like the list of errors are not - a
+  separate consumer will be created to support that.
+  (Robert Collins)
+
+* New support class ``StreamToExtendedDecorator`` which translates
+  ``StreamResult`` API calls into ``ExtendedTestResult`` (or any older
+  ``TestResult``) calls. This permits using un-migrated result objects with
+  new runners / tests. (Robert Collins)
+
+* New support class ``StreamToQueue`` for sending messages to one
+  ``StreamResult`` from multiple threads. (Robert Collins)
+
+* New support class ``TimestampingStreamResult`` which adds a timestamp to
+  events with no timestamp. (Robert Collins)
+
+* New ``TestCase`` decorator ``DecorateTestCaseResult`` that adapts the
+  ``TestResult`` or ``StreamResult`` a case will be run with, for ensuring that
+  a particular result object is used even if the runner running the test doesn't
+  know to use it. (Robert Collins)
+
+* New test support class ``testtools.testresult.doubles.StreamResult``, which
+  captures all the StreamResult events. (Robert Collins)
+
+* ``PlaceHolder`` can now hold tags, and applies them before, and removes them
+  after, the test. (Robert Collins)
+
+* ``PlaceHolder`` can now hold timestamps, and applies them before the test and
+  then before the outcome. (Robert Collins)
+
+* ``StreamResultRouter`` added. This is useful for demultiplexing - e.g. for
+  partitioning analysis of events or sending feedback encapsulated in
+  StreamResult events back to their source. (Robert Collins)
+
+* ``testtools.run.TestProgram`` now supports the ``TestRunner`` taking over
+  responsibility for formatting the output of ``--list-tests``.
+  (Robert Collins)
+
+* The error message for setUp and tearDown upcall errors was broken on Python
+  3.4. (Monty Taylor, Robert Collins, #1140688)
+
+* The repr of object() on pypy includes the object id, which was breaking a
+  test that accidentally depended on the CPython repr for object().
+  (Jonathan Lange)
+
+0.9.29
+~~~~~~
+
+A simple bug fix, and better error messages when you don't up-call.
+
+Changes
+-------
+
+* ``testtools.content_type.ContentType`` incorrectly used ',' rather than ';'
+  to separate parameters. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.compat.unicode_output_stream`` was wrapping a stream encoder
+  around ``io.StringIO`` and ``io.TextIOWrapper`` objects, which was incorrect.
+  (Robert Collins)
+
+* Report the name of the source file for setUp and tearDown upcall errors.
+  (Monty Taylor)
+
+0.9.28
+~~~~~~
+
+Testtools has moved VCS - https://github.com/testing-cabal/testtools/ is
+the new home. Bug tracking is still on Launchpad, and releases are on Pypi.
+
+We made this change to take advantage of the richer ecosystem of tools around
+Git, and to lower the barrier for new contributors.
+
+Improvements
+------------
+
+* New ``testtools.testcase.attr`` and ``testtools.testcase.WithAttributes``
+  helpers allow marking up test case methods with simple labels. This permits
+  filtering tests with more granularity than organising them into modules and
+  test classes. (Robert Collins)
+
+0.9.27
+~~~~~~
+
+Improvements
+------------
+
+* New matcher ``HasLength`` for matching the length of a collection.
+  (Robert Collins)
+
+* New matcher ``MatchesPredicateWithParams`` make it still easier to create
+  ad hoc matchers. (Robert Collins)
+
+* We have a simpler release process in future - see doc/hacking.rst.
+  (Robert Collins)
+
+0.9.26
+~~~~~~
+
+Brown paper bag fix: failed to document the need for setup to be able to use
+extras. Compounded by pip not supporting setup_requires.
+
+Changes
+-------
+
+* setup.py now can generate egg_info even if extras is not available.
+  Also lists extras in setup_requires for easy_install.
+  (Robert Collins, #1102464)
+
+0.9.25
+~~~~~~
+
+Changes
+-------
+
+* ``python -m testtools.run --load-list`` will now preserve any custom suites
+  (such as ``testtools.FixtureSuite`` or ``testresources.OptimisingTestSuite``)
+  rather than flattening them.
+  (Robert Collins, #827175)
+
+* Testtools now depends on extras, a small library split out from it to contain
+  generally useful non-testing facilities. Since extras has been around for a
+  couple of testtools releases now, we're making this into a hard dependency of
+  testtools. (Robert Collins)
+
+* Testtools now uses setuptools rather than distutils so that we can document
+  the extras dependency. (Robert Collins)
+
+Improvements
+------------
+
+* Testtools will no longer override test code registered details called
+  'traceback' when reporting caught exceptions from test code.
+  (Robert Collins, #812793)
+
+0.9.24
+~~~~~~
+
+Changes
+-------
+
+* ``testtools.run discover`` will now sort the tests it discovered. This is a
+  workaround for http://bugs.python.org/issue16709. Non-standard test suites
+  are preserved, and their ``sort_tests()`` method called (if they have such an
+  attribute). ``testtools.testsuite.sorted_tests(suite, True)`` can be used by
+  such suites to do a local sort. (Robert Collins, #1091512)
+
+* ``ThreadsafeForwardingResult`` now defines a stub ``progress`` method, which
+  fixes ``testr run`` of streams containing progress markers (by discarding the
+  progress data). (Robert Collins, #1019165)
+
+0.9.23
+~~~~~~
+
+Changes
+-------
+
+* ``run.TestToolsTestRunner`` now accepts the verbosity, buffer and failfast
+  arguments the upstream python TestProgram code wants to give it, making it
+  possible to support them in a compatible fashion. (Robert Collins)
+
+Improvements
+------------
+
+* ``testtools.run`` now supports the ``-f`` or ``--failfast`` parameter.
+  Previously it was advertised in the help but ignored.
+  (Robert Collins, #1090582)
+
+* ``AnyMatch`` added, a new matcher that matches when any item in a collection
+  matches the given matcher.  (Jonathan Lange)
+
+* Spelling corrections to documentation.  (Vincent Ladeuil)
+
+* ``TestProgram`` now has a sane default for its ``testRunner`` argument.
+  (Vincent Ladeuil)
+
+* The test suite passes on Python 3 again. (Robert Collins)
+
+0.9.22
+~~~~~~
+
+Improvements
+------------
+
+* ``content_from_file`` and ``content_from_stream`` now accept seek_offset and
+  seek_whence parameters allowing them to be used to grab less than the full
+  stream, or to be used with StringIO streams. (Robert Collins, #1088693)
+
+0.9.21
+~~~~~~
+
+Improvements
+------------
+
+* ``DirContains`` correctly exposed, after being accidentally hidden in the
+  great matcher re-organization of 0.9.17.  (Jonathan Lange)
+
+
+0.9.20
+~~~~~~
+
+Three new matchers that'll rock your world.
+
+Improvements
+------------
+
+* New, powerful matchers that match items in a dictionary:
+
+  - ``MatchesDict``, match every key in a dictionary with a key in a
+    dictionary of matchers.  For when the set of expected keys is equal to the
+    set of observed keys.
+
+  - ``ContainsDict``, every key in a dictionary of matchers must be
+    found in a dictionary, and the values for those keys must match.  For when
+    the set of expected keys is a subset of the set of observed keys.
+
+  - ``ContainedByDict``, every key in a dictionary must be found in
+    a dictionary of matchers.  For when the set of expected keys is a superset
+    of the set of observed keys.
+
+  The names are a little confusing, sorry.  We're still trying to figure out
+  how to present the concept in the simplest way possible.
+
+
+0.9.19
+~~~~~~
+
+How embarrassing!  Three releases in two days.
+
+We've worked out the kinks and have confirmation from our downstreams that
+this is all good.  Should be the last release for a little while.  Please
+ignore 0.9.18 and 0.9.17.
+
+Improvements
+------------
+
+* Include the matcher tests in the release, allowing the tests to run and
+  pass from the release tarball.  (Jonathan Lange)
+
+* Fix cosmetic test failures in Python 3.3, introduced during release 0.9.17.
+  (Jonathan Lange)
+
+
+0.9.18
+~~~~~~
+
+Due to an oversight, release 0.9.18 did not contain the new
+``testtools.matchers`` package and was thus completely broken.  This release
+corrects that, returning us all to normality.
+
+0.9.17
+~~~~~~
+
+This release brings better discover support and Python3.x improvements. There
+are still some test failures on Python3.3 but they are cosmetic - the library
+is as usable there as on any other Python 3 release.
+
+Changes
+-------
+
+* The ``testtools.matchers`` package has been split up.  No change to the
+  public interface.  (Jonathan Lange)
+
+Improvements
+------------
+
+* ``python -m testtools.run discover . --list`` now works. (Robert Collins)
+
+* Correctly handling of bytes vs text in JSON content type. (Martin [gz])
+
+
+0.9.16
+~~~~~~
+
+Some new matchers and a new content helper for JSON content.
+
+This is the first release of testtools to drop support for Python 2.4 and 2.5.
+If you need support for either of those versions, please use testtools 0.9.15.
+
+Improvements
+------------
+
+* New content helper, ``json_content`` (Jonathan Lange)
+
+* New matchers:
+
+  * ``ContainsAll`` for asserting one thing is a subset of another
+    (Raphaël Badin)
+
+  * ``SameMembers`` for asserting two iterators have the same members.
+    (Jonathan Lange)
+
+* Reraising of exceptions in Python 3 is more reliable. (Martin [gz])
+
+
+0.9.15
+~~~~~~
+
+This is the last release to support Python2.4 and 2.5. It brings in a slew of
+improvements to test tagging and concurrency, making running large test suites
+with partitioned workers more reliable and easier to reproduce exact test
+ordering in a given worker. See our sister project ``testrepository`` for a
+test runner that uses these features.
+
+Changes
+-------
+
+* ``PlaceHolder`` and ``ErrorHolder`` now support being given result details.
+  (Robert Collins)
+
+* ``ErrorHolder`` is now just a function - all the logic is in ``PlaceHolder``.
+  (Robert Collins)
+
+* ``TestResult`` and all other ``TestResult``-like objects in testtools
+  distinguish between global tags and test-local tags, as per the subunit
+  specification.  (Jonathan Lange)
+
+* This is the **last** release of testtools that supports Python 2.4 or 2.5.
+  These releases are no longer supported by the Python community and do not
+  receive security updates. If this affects you, you will need to either
+  stay on this release or perform your own backports.
+  (Jonathan Lange, Robert Collins)
+
+* ``ThreadsafeForwardingResult`` now forwards global tags as test-local tags,
+  making reasoning about the correctness of the multiplexed stream simpler.
+  This preserves the semantic value (what tags apply to a given test) while
+  consuming less stream size (as no negative-tag statement is needed).
+  (Robert Collins, Gary Poster, #986434)
+
+Improvements
+------------
+
+* API documentation corrections. (Raphaël Badin)
+
+* ``ConcurrentTestSuite`` now takes an optional ``wrap_result`` parameter
+  that can be used to wrap the ``ThreadsafeForwardingResults`` created by
+  the suite.  (Jonathan Lange)
+
+* ``Tagger`` added.  It's a new ``TestResult`` that tags all tests sent to
+  it with a particular set of tags.  (Jonathan Lange)
+
+* ``testresultdecorator`` brought over from subunit.  (Jonathan Lange)
+
+* All ``TestResult`` wrappers now correctly forward ``current_tags`` from
+  their wrapped results, meaning that ``current_tags`` can always be relied
+  upon to return the currently active tags on a test result.
+
+* ``TestByTestResult``, a ``TestResult`` that calls a method once per test,
+  added.  (Jonathan Lange)
+
+* ``ThreadsafeForwardingResult`` correctly forwards ``tags()`` calls where
+  only one of ``new_tags`` or ``gone_tags`` are specified.
+  (Jonathan Lange, #980263)
+
+* ``ThreadsafeForwardingResult`` no longer leaks local tags from one test
+  into all future tests run.  (Jonathan Lange, #985613)
+
+* ``ThreadsafeForwardingResult`` has many, many more tests.  (Jonathan Lange)
+
+
+0.9.14
+~~~~~~
+
+Our sister project, `subunit <https://launchpad.net/subunit>`_, was using a
+private API that was deleted in the 0.9.13 release.  This release restores
+that API in order to smooth out the upgrade path.
+
+If you don't use subunit, then this release won't matter very much to you.
+
+
+0.9.13
+~~~~~~
+
+Plenty of new matchers and quite a few critical bug fixes (especially to do
+with stack traces from failed assertions).  A net win for all.
+
+Changes
+-------
+
+* ``MatchesAll`` now takes an ``first_only`` keyword argument that changes how
+  mismatches are displayed.  If you were previously passing matchers to
+  ``MatchesAll`` with keyword arguments, then this change might affect your
+  test results.  (Jonathan Lange)
+
+Improvements
+------------
+
+* Actually hide all of the testtools stack for assertion failures. The
+  previous release promised clean stack, but now we actually provide it.
+  (Jonathan Lange, #854769)
+
+* ``assertRaises`` now includes the ``repr`` of the callable that failed to raise
+  properly. (Jonathan Lange, #881052)
+
+* Asynchronous tests no longer hang when run with trial.
+  (Jonathan Lange, #926189)
+
+* ``Content`` objects now have an ``as_text`` method to convert their contents
+  to Unicode text.  (Jonathan Lange)
+
+* Failed equality assertions now line up. (Jonathan Lange, #879339)
+
+* ``FullStackRunTest`` no longer aborts the test run if a test raises an
+  error.  (Jonathan Lange)
+
+* ``MatchesAll`` and ``MatchesListwise`` both take a ``first_only`` keyword
+  argument.  If True, they will report only on the first mismatch they find,
+  and not continue looking for other possible mismatches.
+  (Jonathan Lange)
+
+* New helper, ``Nullary`` that turns callables with arguments into ones that
+  don't take arguments.  (Jonathan Lange)
+
+* New matchers:
+
+  * ``DirContains`` matches the contents of a directory.
+    (Jonathan Lange, James Westby)
+
+  * ``DirExists`` matches if a directory exists.
+    (Jonathan Lange, James Westby)
+
+  * ``FileContains`` matches the contents of a file.
+    (Jonathan Lange, James Westby)
+
+  * ``FileExists`` matches if a file exists.
+    (Jonathan Lange, James Westby)
+
+  * ``HasPermissions`` matches the permissions of a file.  (Jonathan Lange)
+
+  * ``MatchesPredicate`` matches if a predicate is true.  (Jonathan Lange)
+
+  * ``PathExists`` matches if a path exists.  (Jonathan Lange, James Westby)
+
+  * ``SamePath`` matches if two paths are the same.  (Jonathan Lange)
+
+  * ``TarballContains`` matches the contents of a tarball.  (Jonathan Lange)
+
+* ``MultiTestResult`` supports the ``tags`` method.
+  (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` supports the ``tags`` method.
+  (Graham Binns, Francesco Banconi, #914279)
+
+* ``ThreadsafeForwardingResult`` no longer includes semaphore acquisition time
+  in the test duration (for implicitly timed test runs).
+  (Robert Collins, #914362)
+
+0.9.12
+~~~~~~
+
+This is a very big release.  We've made huge improvements on three fronts:
+ 1. Test failures are way nicer and easier to read
+ 2. Matchers and ``assertThat`` are much more convenient to use
+ 3. Correct handling of extended unicode characters
+
+We've trimmed off the fat from the stack trace you get when tests fail, we've
+cut out the bits of error messages that just didn't help, we've made it easier
+to annotate mismatch failures, to compare complex objects and to match raised
+exceptions.
+
+Testing code was never this fun.
+
+Changes
+-------
+
+* ``AfterPreproccessing`` renamed to ``AfterPreprocessing``, which is a more
+  correct spelling.  Old name preserved for backwards compatibility, but is
+  now deprecated.  Please stop using it.
+  (Jonathan Lange, #813460)
+
+* ``assertThat`` raises ``MismatchError`` instead of
+  ``TestCase.failureException``.  ``MismatchError`` is a subclass of
+  ``AssertionError``, so in most cases this change will not matter. However,
+  if ``self.failureException`` has been set to a non-default value, then
+  mismatches will become test errors rather than test failures.
+
+* ``gather_details`` takes two dicts, rather than two detailed objects.
+  (Jonathan Lange, #801027)
+
+* ``MatchesRegex`` mismatch now says "<value> does not match /<regex>/" rather
+  than "<regex> did not match <value>". The regular expression contains fewer
+  backslashes too. (Jonathan Lange, #818079)
+
+* Tests that run with ``AsynchronousDeferredRunTest`` now have the ``reactor``
+  attribute set to the running reactor. (Jonathan Lange, #720749)
+
+Improvements
+------------
+
+* All public matchers are now in ``testtools.matchers.__all__``.
+  (Jonathan Lange, #784859)
+
+* ``assertThat`` can actually display mismatches and matchers that contain
+  extended unicode characters. (Jonathan Lange, Martin [gz], #804127)
+
+* ``assertThat`` output is much less verbose, displaying only what the mismatch
+  tells us to display. Old-style verbose output can be had by passing
+  ``verbose=True`` to assertThat. (Jonathan Lange, #675323, #593190)
+
+* ``assertThat`` accepts a message which will be used to annotate the matcher.
+  This can be given as a third parameter or as a keyword parameter.
+  (Robert Collins)
+
+* Automated the Launchpad part of the release process.
+  (Jonathan Lange, #623486)
+
+* Correctly display non-ASCII unicode output on terminals that claim to have a
+  unicode encoding. (Martin [gz], #804122)
+
+* ``DocTestMatches`` correctly handles unicode output from examples, rather
+  than raising an error. (Martin [gz], #764170)
+
+* ``ErrorHolder`` and ``PlaceHolder`` added to docs. (Jonathan Lange, #816597)
+
+* ``ExpectedException`` now matches any exception of the given type by
+  default, and also allows specifying a ``Matcher`` rather than a mere regular
+  expression. (Jonathan Lange, #791889)
+
+* ``FixtureSuite`` added, allows test suites to run with a given fixture.
+  (Jonathan Lange)
+
+* Hide testtools's own stack frames when displaying tracebacks, making it
+  easier for test authors to focus on their errors.
+  (Jonathan Lange, Martin [gz], #788974)
+
+* Less boilerplate displayed in test failures and errors.
+  (Jonathan Lange, #660852)
+
+* ``MatchesException`` now allows you to match exceptions against any matcher,
+  rather than just regular expressions.  (Jonathan Lange, #791889)
+
+* ``MatchesException`` now permits a tuple of types rather than a single type
+  (when using the type matching mode).  (Robert Collins)
+
+* ``MatchesStructure.byEquality`` added to make the common case of matching
+  many attributes by equality much easier.  ``MatchesStructure.byMatcher``
+  added in case folk want to match by things other than equality.
+  (Jonathan Lange)
+
+* New convenience assertions, ``assertIsNone`` and ``assertIsNotNone``.
+  (Christian Kampka)
+
+* New matchers:
+
+  * ``AllMatch`` matches many values against a single matcher.
+    (Jonathan Lange, #615108)
+
+  * ``Contains``. (Robert Collins)
+
+  * ``GreaterThan``. (Christian Kampka)
+
+* New helper, ``safe_hasattr`` added. (Jonathan Lange)
+
+* ``reraise`` added to ``testtools.compat``. (Jonathan Lange)
+
+
+0.9.11
+~~~~~~
+
+This release brings consistent use of super for better compatibility with
+multiple inheritance, fixed Python3 support, improvements in fixture and mather
+outputs and a compat helper for testing libraries that deal with bytestrings.
+
+Changes
+-------
+
+* ``TestCase`` now uses super to call base ``unittest.TestCase`` constructor,
+  ``setUp`` and ``tearDown``. (Tim Cole, #771508)
+
+* If, when calling ``useFixture`` an error occurs during fixture set up, we
+  still attempt to gather details from the fixture. (Gavin Panella)
+
+
+Improvements
+------------
+
+* Additional compat helper for ``BytesIO`` for libraries that build on
+  testtools and are working on Python 3 porting. (Robert Collins)
+
+* Corrected documentation for ``MatchesStructure`` in the test authors
+  document.  (Jonathan Lange)
+
+* ``LessThan`` error message now says something that is logically correct.
+  (Gavin Panella, #762008)
+
+* Multiple details from a single fixture are now kept separate, rather than
+  being mooshed together. (Gavin Panella, #788182)
+
+* Python 3 support now back in action. (Martin [gz], #688729)
+
+* ``try_import`` and ``try_imports`` have a callback that is called whenever
+  they fail to import a module.  (Martin Pool)
+
+
+0.9.10
+~~~~~~
+
+The last release of testtools could not be easy_installed.  This is considered
+severe enough for a re-release.
+
+Improvements
+------------
+
+* Include ``doc/`` in the source distribution, making testtools installable
+  from PyPI again (Tres Seaver, #757439)
+
+
+0.9.9
+~~~~~
+
+Many, many new matchers, vastly expanded documentation, stacks of bug fixes,
+better unittest2 integration.  If you've ever wanted to try out testtools but
+been afraid to do so, this is the release to try.
+
+
+Changes
+-------
+
+* The timestamps generated by ``TestResult`` objects when no timing data has
+  been received are now datetime-with-timezone, which allows them to be
+  sensibly serialised and transported. (Robert Collins, #692297)
+
+Improvements
+------------
+
+* ``AnnotatedMismatch`` now correctly returns details.
+  (Jonathan Lange, #724691)
+
+* distutils integration for the testtools test runner. Can now use it for
+  'python setup.py test'. (Christian Kampka, #693773)
+
+* ``EndsWith`` and ``KeysEqual`` now in testtools.matchers.__all__.
+  (Jonathan Lange, #692158)
+
+* ``MatchesException`` extended to support a regular expression check against
+  the str() of a raised exception.  (Jonathan Lange)
+
+* ``MultiTestResult`` now forwards the ``time`` API. (Robert Collins, #692294)
+
+* ``MultiTestResult`` now documented in the manual. (Jonathan Lange, #661116)
+
+* New content helpers ``content_from_file``, ``content_from_stream`` and
+  ``attach_file`` make it easier to attach file-like objects to a
+  test. (Jonathan Lange, Robert Collins, #694126)
+
+* New ``ExpectedException`` context manager to help write tests against things
+  that are expected to raise exceptions. (Aaron Bentley)
+
+* New matchers:
+
+  * ``MatchesListwise`` matches an iterable of matchers against an iterable
+    of values. (Michael Hudson-Doyle)
+
+  * ``MatchesRegex`` matches a string against a regular expression.
+    (Michael Hudson-Doyle)
+
+  * ``MatchesStructure`` matches attributes of an object against given
+    matchers.  (Michael Hudson-Doyle)
+
+  * ``AfterPreproccessing`` matches values against a matcher after passing them
+    through a callable.  (Michael Hudson-Doyle)
+
+  * ``MatchesSetwise`` matches an iterable of matchers against an iterable of
+    values, without regard to order.  (Michael Hudson-Doyle)
+
+* ``setup.py`` can now build a snapshot when Bazaar is installed but the tree
+  is not a Bazaar tree. (Jelmer Vernooij)
+
+* Support for running tests using distutils (Christian Kampka, #726539)
+
+* Vastly improved and extended documentation. (Jonathan Lange)
+
+* Use unittest2 exception classes if available. (Jelmer Vernooij)
+
+
+0.9.8
+~~~~~
+
+In this release we bring some very interesting improvements:
+
+* new matchers for exceptions, sets, lists, dicts and more.
+
+* experimental (works but the contract isn't supported) twisted reactor
+  support.
+
+* The built in runner can now list tests and filter tests (the -l and
+  --load-list options).
+
+Changes
+-------
+
+* addUnexpectedSuccess is translated to addFailure for test results that don't
+  know about addUnexpectedSuccess.  Further, it fails the entire result for
+  all testtools TestResults (i.e. wasSuccessful() returns False after
+  addUnexpectedSuccess has been called). Note that when using a delegating
+  result such as ThreadsafeForwardingResult, MultiTestResult or
+  ExtendedToOriginalDecorator then the behaviour of addUnexpectedSuccess is
+  determined by the delegated to result(s).
+  (Jonathan Lange, Robert Collins, #654474, #683332)
+
+* startTestRun will reset any errors on the result.  That is, wasSuccessful()
+  will always return True immediately after startTestRun() is called. This
+  only applies to delegated test results (ThreadsafeForwardingResult,
+  MultiTestResult and ExtendedToOriginalDecorator) if the delegated to result
+  is a testtools test result - we cannot reliably reset the state of unknown
+  test result class instances. (Jonathan Lange, Robert Collins, #683332)
+
+* Responsibility for running test cleanups has been moved to ``RunTest``.
+  This change does not affect public APIs and can be safely ignored by test
+  authors.  (Jonathan Lange, #662647)
+
+Improvements
+------------
+
+* New matchers:
+
+  * ``EndsWith`` which complements the existing ``StartsWith`` matcher.
+    (Jonathan Lange, #669165)
+
+  * ``MatchesException`` matches an exception class and parameters. (Robert
+    Collins)
+
+  * ``KeysEqual`` matches a dictionary with particular keys.  (Jonathan Lange)
+
+* ``assertIsInstance`` supports a custom error message to be supplied, which
+  is necessary when using ``assertDictEqual`` on Python 2.7 with a
+  ``testtools.TestCase`` base class. (Jelmer Vernooij)
+
+* Experimental support for running tests that return Deferreds.
+  (Jonathan Lange, Martin [gz])
+
+* Provide a per-test decorator, run_test_with, to specify which RunTest
+  object to use for a given test.  (Jonathan Lange, #657780)
+
+* Fix the runTest parameter of TestCase to actually work, rather than raising
+  a TypeError.  (Jonathan Lange, #657760)
+
+* Non-release snapshots of testtools will now work with buildout.
+  (Jonathan Lange, #613734)
+
+* Malformed SyntaxErrors no longer blow up the test suite.  (Martin [gz])
+
+* ``MismatchesAll.describe`` no longer appends a trailing newline.
+  (Michael Hudson-Doyle, #686790)
+
+* New helpers for conditionally importing modules, ``try_import`` and
+  ``try_imports``.  (Jonathan Lange)
+
+* ``Raises`` added to the ``testtools.matchers`` module - matches if the
+  supplied callable raises, and delegates to an optional matcher for validation
+  of the exception. (Robert Collins)
+
+* ``raises`` added to the ``testtools.matchers`` module - matches if the
+  supplied callable raises and delegates to ``MatchesException`` to validate
+  the exception. (Jonathan Lange)
+
+* Tests will now pass on Python 2.6.4 : an ``Exception`` change made only in
+  2.6.4 and reverted in Python 2.6.5 was causing test failures on that version.
+  (Martin [gz], #689858).
+
+* ``testtools.TestCase.useFixture`` has been added to glue with fixtures nicely.
+  (Robert Collins)
+
+* ``testtools.run`` now supports ``-l`` to list tests rather than executing
+  them. This is useful for integration with external test analysis/processing
+  tools like subunit and testrepository. (Robert Collins)
+
+* ``testtools.run`` now supports ``--load-list``, which takes a file containing
+  test ids, one per line, and intersects those ids with the tests found. This
+  allows fine grained control of what tests are run even when the tests cannot
+  be named as objects to import (e.g. due to test parameterisation via
+  testscenarios). (Robert Collins)
+
+* Update documentation to say how to use testtools.run() on Python 2.4.
+  (Jonathan Lange, #501174)
+
+* ``text_content`` conveniently converts a Python string to a Content object.
+  (Jonathan Lange, James Westby)
+
+
+
+0.9.7
+~~~~~
+
+Lots of little cleanups in this release; many small improvements to make your
+testing life more pleasant.
+
+Improvements
+------------
+
+* Cleanups can raise ``testtools.MultipleExceptions`` if they have multiple
+  exceptions to report. For instance, a cleanup which is itself responsible for
+  running several different internal cleanup routines might use this.
+
+* Code duplication between assertEqual and the matcher Equals has been removed.
+
+* In normal circumstances, a TestCase will no longer share details with clones
+  of itself. (Andrew Bennetts, bug #637725)
+
+* Less exception object cycles are generated (reduces peak memory use between
+  garbage collection). (Martin [gz])
+
+* New matchers 'DoesNotStartWith' and 'StartsWith' contributed by Canonical
+  from the Launchpad project. Written by James Westby.
+
+* Timestamps as produced by subunit protocol clients are now forwarded in the
+  ThreadsafeForwardingResult so correct test durations can be reported.
+  (Martin [gz], Robert Collins, #625594)
+
+* With unittest from Python 2.7 skipped tests will now show only the reason
+  rather than a serialisation of all details. (Martin [gz], #625583)
+
+* The testtools release process is now a little better documented and a little
+  smoother.  (Jonathan Lange, #623483, #623487)
+
+
+0.9.6
+~~~~~
+
+Nothing major in this release, just enough small bits and pieces to make it
+useful enough to upgrade to.
+
+In particular, a serious bug in assertThat() has been fixed, it's easier to
+write Matchers, there's a TestCase.patch() method for those inevitable monkey
+patches and TestCase.assertEqual gives slightly nicer errors.
+
+Improvements
+------------
+
+* 'TestCase.assertEqual' now formats errors a little more nicely, in the
+  style of bzrlib.
+
+* Added `PlaceHolder` and `ErrorHolder`, TestCase-like objects that can be
+  used to add results to a `TestResult`.
+
+* 'Mismatch' now takes optional description and details parameters, so
+  custom Matchers aren't compelled to make their own subclass.
+
+* jml added a built-in UTF8_TEXT ContentType to make it slightly easier to
+  add details to test results. See bug #520044.
+
+* Fix a bug in our built-in matchers where assertThat would blow up if any
+  of them failed. All built-in mismatch objects now provide get_details().
+
+* New 'Is' matcher, which lets you assert that a thing is identical to
+  another thing.
+
+* New 'LessThan' matcher which lets you assert that a thing is less than
+  another thing.
+
+* TestCase now has a 'patch()' method to make it easier to monkey-patching
+  objects in tests. See the manual for more information. Fixes bug #310770.
+
+* MultiTestResult methods now pass back return values from the results it
+  forwards to.
+
+0.9.5
+~~~~~
+
+This release fixes some obscure traceback formatting issues that probably
+weren't affecting you but were certainly breaking our own test suite.
+
+Changes
+-------
+
+* Jamu Kakar has updated classes in testtools.matchers and testtools.runtest
+  to be new-style classes, fixing bug #611273.
+
+Improvements
+------------
+
+* Martin[gz] fixed traceback handling to handle cases where extract_tb returns
+  a source line of None. Fixes bug #611307.
+
+* Martin[gz] fixed an unicode issue that was causing the tests to fail,
+  closing bug #604187.
+
+* testtools now handles string exceptions (although why would you want to use
+  them?) and formats their tracebacks correctly. Thanks to Martin[gz] for
+  fixing bug #592262.
+
+0.9.4
+~~~~~
+
+This release overhauls the traceback formatting layer to deal with Python 2
+line numbers and traceback objects often being local user encoded strings
+rather than unicode objects. Test discovery has also been added and Python 3.1
+is also supported. Finally, the Mismatch protocol has been extended to let
+Matchers collaborate with tests in supplying detailed data about failures.
+
+Changes
+-------
+
+* testtools.utils has been renamed to testtools.compat. Importing
+  testtools.utils will now generate a deprecation warning.
+
+Improvements
+------------
+
+* Add machinery for Python 2 to create unicode tracebacks like those used by
+  Python 3. This means testtools no longer throws on encountering non-ascii
+  filenames, source lines, or exception strings when displaying test results.
+  Largely contributed by Martin[gz] with some tweaks from Robert Collins.
+
+* James Westby has supplied test discovery support using the Python 2.7
+  TestRunner in testtools.run. This requires the 'discover' module. This
+  closes bug #250764.
+
+* Python 3.1 is now supported, thanks to Martin[gz] for a partial patch.
+  This fixes bug #592375.
+
+* TestCase.addCleanup has had its docstring corrected about when cleanups run.
+
+* TestCase.skip is now deprecated in favour of TestCase.skipTest, which is the
+  Python2.7 spelling for skip. This closes bug #560436.
+
+* Tests work on IronPython patch from Martin[gz] applied.
+
+* Thanks to a patch from James Westby testtools.matchers.Mismatch can now
+  supply a get_details method, which assertThat will query to provide
+  additional attachments. This can be used to provide additional detail
+  about the mismatch that doesn't suite being included in describe(). For
+  instance, if the match process was complex, a log of the process could be
+  included, permitting debugging.
+
+* testtools.testresults.real._StringException will now answer __str__ if its
+  value is unicode by encoding with UTF8, and vice versa to answer __unicode__.
+  This permits subunit decoded exceptions to contain unicode and still format
+  correctly.
+
+0.9.3
+~~~~~
+
+More matchers, Python 2.4 support, faster test cloning by switching to copy
+rather than deepcopy and better output when exceptions occur in cleanups are
+the defining characteristics of this release.
+
+Improvements
+------------
+
+* New matcher "Annotate" that adds a simple string message to another matcher,
+  much like the option 'message' parameter to standard library assertFoo
+  methods.
+
+* New matchers "Not" and "MatchesAll". "Not" will invert another matcher, and
+  "MatchesAll" that needs a successful match for all of its arguments.
+
+* On Python 2.4, where types.FunctionType cannot be deepcopied, testtools will
+  now monkeypatch copy._deepcopy_dispatch using the same trivial patch that
+  added such support to Python 2.5. The monkey patch is triggered by the
+  absence of FunctionType from the dispatch dict rather than a version check.
+  Bug #498030.
+
+* On windows the test 'test_now_datetime_now' should now work reliably.
+
+* TestCase.getUniqueInteger and TestCase.getUniqueString now have docstrings.
+
+* TestCase.getUniqueString now takes an optional prefix parameter, so you can
+  now use it in circumstances that forbid strings with '.'s, and such like.
+
+* testtools.testcase.clone_test_with_new_id now uses copy.copy, rather than
+  copy.deepcopy. Tests that need a deeper copy should use the copy protocol to
+  control how they are copied. Bug #498869.
+
+* The backtrace test result output tests should now pass on windows and other
+  systems where os.sep is not '/'.
+
+* When a cleanUp or tearDown exception occurs, it is now accumulated as a new
+  traceback in the test details, rather than as a separate call to addError /
+  addException. This makes testtools work better with most TestResult objects
+  and fixes bug #335816.
+
+
+0.9.2
+~~~~~
+
+Python 3 support, more matchers and better consistency with Python 2.7 --
+you'd think that would be enough for a point release. Well, we here on the
+testtools project think that you deserve more.
+
+We've added a hook so that user code can be called just-in-time whenever there
+is an exception, and we've also factored out the "run" logic of test cases so
+that new outcomes can be added without fiddling with the actual flow of logic.
+
+It might sound like small potatoes, but it's changes like these that will
+bring about the end of test frameworks.
+
+
+Improvements
+------------
+
+* A failure in setUp and tearDown now report as failures not as errors.
+
+* Cleanups now run after tearDown to be consistent with Python 2.7's cleanup
+  feature.
+
+* ExtendedToOriginalDecorator now passes unrecognised attributes through
+  to the decorated result object, permitting other extensions to the
+  TestCase -> TestResult protocol to work.
+
+* It is now possible to trigger code just-in-time after an exception causes
+  a test outcome such as failure or skip. See the testtools MANUAL or
+  ``pydoc testtools.TestCase.addOnException``. (bug #469092)
+
+* New matcher Equals which performs a simple equality test.
+
+* New matcher MatchesAny which looks for a match of any of its arguments.
+
+* TestCase no longer breaks if a TestSkipped exception is raised with no
+  parameters.
+
+* TestCase.run now clones test cases before they are run and runs the clone.
+  This reduces memory footprint in large test runs - state accumulated on
+  test objects during their setup and execution gets freed when test case
+  has finished running unless the TestResult object keeps a reference.
+  NOTE: As test cloning uses deepcopy, this can potentially interfere if
+  a test suite has shared state (such as the testscenarios or testresources
+  projects use).  Use the __deepcopy__ hook to control the copying of such
+  objects so that the shared references stay shared.
+
+* Testtools now accepts contributions without copyright assignment under some
+  circumstances. See HACKING for details.
+
+* Testtools now provides a convenient way to run a test suite using the
+  testtools result object: python -m testtools.run testspec [testspec...].
+
+* Testtools now works on Python 3, thanks to Benjamin Peterson.
+
+* Test execution now uses a separate class, testtools.RunTest to run single
+  tests. This can be customised and extended in a more consistent fashion than
+  the previous run method idiom. See pydoc for more information.
+
+* The test doubles that testtools itself uses are now available as part of
+  the testtools API in testtols.testresult.doubles.
+
+* TracebackContent now sets utf8 as the charset encoding, rather than not
+  setting one and encoding with the default encoder.
+
+* With python2.7 testtools.TestSkipped will be the unittest.case.SkipTest
+  exception class making skips compatible with code that manually raises the
+  standard library exception. (bug #490109)
+
+Changes
+-------
+
+* TestCase.getUniqueInteger is now implemented using itertools.count. Thanks
+  to Benjamin Peterson for the patch. (bug #490111)
+
+
+0.9.1
+~~~~~
+
+The new matcher API introduced in 0.9.0 had a small flaw where the matchee
+would be evaluated twice to get a description of the mismatch. This could lead
+to bugs if the act of matching caused side effects to occur in the matchee.
+Since having such side effects isn't desirable, we have changed the API now
+before it has become widespread.
+
+Changes
+-------
+
+* Matcher API changed to avoid evaluating matchee twice. Please consult
+  the API documentation.
+
+* TestCase.getUniqueString now uses the test id, not the test method name,
+  which works nicer with parameterised tests.
+
+Improvements
+------------
+
+* Python2.4 is now supported again.
+
+
+0.9.0
+~~~~~
+
+This release of testtools is perhaps the most interesting and exciting one
+it's ever had. We've continued in bringing together the best practices of unit
+testing from across a raft of different Python projects, but we've also
+extended our mission to incorporating unit testing concepts from other
+languages and from our own research, led by Robert Collins.
+
+We now support skipping and expected failures. We'll make sure that you
+up-call setUp and tearDown, avoiding unexpected testing weirdnesses. We're
+now compatible with Python 2.5, 2.6 and 2.7 unittest library.
+
+All in all, if you are serious about unit testing and want to get the best
+thinking from the whole Python community, you should get this release.
+
+Improvements
+------------
+
+* A new TestResult API has been added for attaching details to test outcomes.
+  This API is currently experimental, but is being prepared with the intent
+  of becoming an upstream Python API. For more details see pydoc
+  testtools.TestResult and the TestCase addDetail / getDetails methods.
+
+* assertThat has been added to TestCase. This new assertion supports
+  a hamcrest-inspired matching protocol. See pydoc testtools.Matcher for
+  details about writing matchers, and testtools.matchers for the included
+  matchers. See http://code.google.com/p/hamcrest/.
+
+* Compatible with Python 2.6 and Python 2.7
+
+* Failing to upcall in setUp or tearDown will now cause a test failure.
+  While the base methods do nothing, failing to upcall is usually a problem
+  in deeper hierarchies, and checking that the root method is called is a
+  simple way to catch this common bug.
+
+* New TestResult decorator ExtendedToOriginalDecorator which handles
+  downgrading extended API calls like addSkip to older result objects that
+  do not support them. This is used internally to make testtools simpler but
+  can also be used to simplify other code built on or for use with testtools.
+
+* New TextTestResult supporting the extended APIs that testtools provides.
+
+* Nose will no longer find 'runTest' tests in classes derived from
+   testtools.testcase.TestCase (bug #312257).
+
+* Supports the Python 2.7/3.1 addUnexpectedSuccess and addExpectedFailure
+  TestResult methods, with a support function 'knownFailure' to let tests
+  trigger these outcomes.
+
+* When using the skip feature with TestResult objects that do not support it
+  a test success will now be reported. Previously an error was reported but
+  production experience has shown that this is too disruptive for projects that
+  are using skips: they cannot get a clean run on down-level result objects.
+
+
+.. _testtools: http://pypi.python.org/pypi/testtools
diff --git a/third_party/testtools/README.rst b/third_party/testtools/README.rst
new file mode 100644
index 0000000..cddb594
--- /dev/null
+++ b/third_party/testtools/README.rst
@@ -0,0 +1,92 @@
+=========
+testtools
+=========
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework.
+
+These extensions have been derived from years of experience with unit testing
+in Python and come from many different sources.
+
+
+Documentation
+-------------
+
+If you would like to learn more about testtools, consult our documentation in
+the 'doc/' directory.  You might like to start at 'doc/overview.rst' or
+'doc/for-test-authors.rst'.
+
+
+Licensing
+---------
+
+This project is distributed under the MIT license and copyright is owned by
+Jonathan M. Lange and the testtools authors. See LICENSE for details.
+
+Some code in 'testtools/run.py' is taken from Python's unittest module, and is
+copyright Steve Purcell and the Python Software Foundation, it is distributed
+under the same license as Python, see LICENSE for details.
+
+
+Required Dependencies
+---------------------
+
+ * Python 2.6+ or 3.0+
+
+If you would like to use testtools for earlier Python's, please use testtools
+0.9.15.
+
+ * extras (helpers that we intend to push into Python itself in the near
+   future).
+
+
+Optional Dependencies
+---------------------
+
+If you would like to use our undocumented, unsupported Twisted support, then
+you will need Twisted.
+
+If you want to use ``fixtures`` then you can either install fixtures (e.g. from
+https://launchpad.net/python-fixtures or http://pypi.python.org/pypi/fixtures)
+or alternatively just make sure your fixture objects obey the same protocol.
+
+
+Bug reports and patches
+-----------------------
+
+Please report bugs using Launchpad at <https://bugs.launchpad.net/testtools>.
+Patches should be submitted as Github pull requests, or mailed to the authors.
+See ``doc/hacking.rst`` for more details.
+
+There's no mailing list for this project yet, however the testing-in-python
+mailing list may be a useful resource:
+
+ * Address: testing-in-python at lists.idyll.org
+ * Subscription link: http://lists.idyll.org/listinfo/testing-in-python
+
+
+History
+-------
+
+testtools used to be called 'pyunit3k'.  The name was changed to avoid
+conflating the library with the Python 3.0 release (commonly referred to as
+'py3k').
+
+
+Thanks
+------
+
+ * Canonical Ltd
+ * Bazaar
+ * Twisted Matrix Labs
+ * Robert Collins
+ * Andrew Bennetts
+ * Benjamin Peterson
+ * Jamu Kakar
+ * James Westby
+ * Martin [gz]
+ * Michael Hudson-Doyle
+ * Aaron Bentley
+ * Christian Kampka
+ * Gavin Panella
+ * Martin Pool
diff --git a/third_party/testtools/doc/Makefile b/third_party/testtools/doc/Makefile
new file mode 100644
index 0000000..b5d07af
--- /dev/null
+++ b/third_party/testtools/doc/Makefile
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html      to make standalone HTML files"
+	@echo "  dirhtml   to make HTML files named index.html in directories"
+	@echo "  pickle    to make pickle files"
+	@echo "  json      to make JSON files"
+	@echo "  htmlhelp  to make HTML files and a HTML help project"
+	@echo "  qthelp    to make HTML files and a qthelp project"
+	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  changes   to make an overview of all changed/added/deprecated items"
+	@echo "  linkcheck to check all external links for integrity"
+	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	-rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/testtools.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/testtools.qhc"
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+	      "run these through (pdf)latex."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/third_party/testtools/doc/_static/placeholder.txt b/third_party/testtools/doc/_static/placeholder.txt
new file mode 100644
index 0000000..e69de29
diff --git a/third_party/testtools/doc/_templates/placeholder.txt b/third_party/testtools/doc/_templates/placeholder.txt
new file mode 100644
index 0000000..e69de29
diff --git a/third_party/testtools/doc/api.rst b/third_party/testtools/doc/api.rst
new file mode 100644
index 0000000..425c818
--- /dev/null
+++ b/third_party/testtools/doc/api.rst
@@ -0,0 +1,26 @@
+testtools API documentation
+===========================
+
+Generated reference documentation for all the public functionality of
+testtools.
+
+Please :doc:`send patches </hacking>` if you notice anything confusing or
+wrong, or that could be improved.
+
+
+.. toctree::
+   :maxdepth: 2
+
+
+testtools
+---------
+
+.. automodule:: testtools
+   :members:
+
+
+testtools.matchers
+------------------
+
+.. automodule:: testtools.matchers
+   :members:
diff --git a/third_party/testtools/doc/conf.py b/third_party/testtools/doc/conf.py
new file mode 100644
index 0000000..de5fdd4
--- /dev/null
+++ b/third_party/testtools/doc/conf.py
@@ -0,0 +1,194 @@
+# -*- coding: utf-8 -*-
+#
+# testtools documentation build configuration file, created by
+# sphinx-quickstart on Sun Nov 28 13:45:40 2010.
+#
+# This file is execfile()d with the current directory set to its containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys, os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.append(os.path.abspath('.'))
+
+# -- General configuration -----------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be extensions
+# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+extensions = ['sphinx.ext.autodoc']
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'testtools'
+copyright = u'2010, The testtools authors'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = 'VERSION'
+# The full version, including alpha/beta/rc tags.
+release = 'VERSION'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of documents that shouldn't be included in the build.
+#unused_docs = []
+
+# List of directories, relative to source directory, that shouldn't be searched
+# for source files.
+exclude_trees = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+
+# -- Options for HTML output ---------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  Major themes that come with
+# Sphinx are currently 'default' and 'sphinxdoc'.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_use_modindex = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = ''
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'testtoolsdoc'
+
+
+# -- Options for LaTeX output --------------------------------------------------
+
+# The paper size ('letter' or 'a4').
+#latex_paper_size = 'letter'
+
+# The font size ('10pt', '11pt' or '12pt').
+#latex_font_size = '10pt'
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title, author, documentclass [howto/manual]).
+latex_documents = [
+  ('index', 'testtools.tex', u'testtools Documentation',
+   u'The testtools authors', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# Additional stuff for the LaTeX preamble.
+#latex_preamble = ''
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_use_modindex = True
diff --git a/third_party/testtools/doc/for-framework-folk.rst b/third_party/testtools/doc/for-framework-folk.rst
new file mode 100644
index 0000000..5c83ab1
--- /dev/null
+++ b/third_party/testtools/doc/for-framework-folk.rst
@@ -0,0 +1,453 @@
+============================
+testtools for framework folk
+============================
+
+Introduction
+============
+
+In addition to having many features :doc:`for test authors
+<for-test-authors>`, testtools also has many bits and pieces that are useful
+for folk who write testing frameworks.
+
+If you are the author of a test runner, are working on a very large
+unit-tested project, are trying to get one testing framework to play nicely
+with another or are hacking away at getting your test suite to run in parallel
+over a heterogenous cluster of machines, this guide is for you.
+
+This manual is a summary. You can get details by consulting the
+:doc:`testtools API docs </api>`.
+
+
+Extensions to TestCase
+======================
+
+In addition to the ``TestCase`` specific methods, we have extensions for
+``TestSuite`` that also apply to ``TestCase`` (because ``TestCase`` and
+``TestSuite`` follow the Composite pattern).
+
+Custom exception handling
+-------------------------
+
+testtools provides a way to control how test exceptions are handled.  To do
+this, add a new exception to ``self.exception_handlers`` on a
+``testtools.TestCase``.  For example::
+
+    >>> self.exception_handlers.insert(-1, (ExceptionClass, handler)).
+
+Having done this, if any of ``setUp``, ``tearDown``, or the test method raise
+``ExceptionClass``, ``handler`` will be called with the test case, test result
+and the raised exception.
+
+Use this if you want to add a new kind of test result, that is, if you think
+that ``addError``, ``addFailure`` and so forth are not enough for your needs.
+
+
+Controlling test execution
+--------------------------
+
+If you want to control more than just how exceptions are raised, you can
+provide a custom ``RunTest`` to a ``TestCase``.  The ``RunTest`` object can
+change everything about how the test executes.
+
+To work with ``testtools.TestCase``, a ``RunTest`` must have a factory that
+takes a test and an optional list of exception handlers and an optional
+last_resort handler.  Instances returned by the factory must have a ``run()``
+method that takes an optional ``TestResult`` object.
+
+The default is ``testtools.runtest.RunTest``, which calls ``setUp``, the test
+method, ``tearDown`` and clean ups (see :ref:`addCleanup`) in the normal, vanilla
+way that Python's standard unittest_ does.
+
+To specify a ``RunTest`` for all the tests in a ``TestCase`` class, do something
+like this::
+
+  class SomeTests(TestCase):
+      run_tests_with = CustomRunTestFactory
+
+To specify a ``RunTest`` for a specific test in a ``TestCase`` class, do::
+
+  class SomeTests(TestCase):
+      @run_test_with(CustomRunTestFactory, extra_arg=42, foo='whatever')
+      def test_something(self):
+          pass
+
+In addition, either of these can be overridden by passing a factory in to the
+``TestCase`` constructor with the optional ``runTest`` argument.
+
+
+Test renaming
+-------------
+
+``testtools.clone_test_with_new_id`` is a function to copy a test case
+instance to one with a new name.  This is helpful for implementing test
+parameterization.
+
+.. _force_failure:
+
+Delayed Test Failure
+--------------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to True will
+cause ``testtools.RunTest`` to fail the test case after the test has finished.
+This is useful when you want to cause a test to fail, but don't want to
+prevent the remainder of the test code from being executed.
+
+Test placeholders
+=================
+
+Sometimes, it's useful to be able to add things to a test suite that are not
+actually tests.  For example, you might wish to represents import failures
+that occur during test discovery as tests, so that your test result object
+doesn't have to do special work to handle them nicely.
+
+testtools provides two such objects, called "placeholders": ``PlaceHolder``
+and ``ErrorHolder``.  ``PlaceHolder`` takes a test id and an optional
+description.  When it's run, it succeeds.  ``ErrorHolder`` takes a test id,
+and error and an optional short description.  When it's run, it reports that
+error.
+
+These placeholders are best used to log events that occur outside the test
+suite proper, but are still very relevant to its results.
+
+e.g.::
+
+  >>> suite = TestSuite()
+  >>> suite.add(PlaceHolder('I record an event'))
+  >>> suite.run(TextTestResult(verbose=True))
+  I record an event                                                   [OK]
+
+
+Test instance decorators
+========================
+
+DecorateTestCaseResult
+----------------------
+
+This object calls out to your code when ``run`` / ``__call__`` are called and
+allows the result object that will be used to run the test to be altered. This
+is very useful when working with a test runner that doesn't know your test case
+requirements. For instance, it can be used to inject a ``unittest2`` compatible
+adapter when someone attempts to run your test suite with a ``TestResult`` that
+does not support ``addSkip`` or other ``unittest2`` methods. Similarly it can
+aid the migration to ``StreamResult``.
+
+e.g.::
+
+ >>> suite = TestSuite()
+ >>> suite = DecorateTestCaseResult(suite, ExtendedToOriginalDecorator)
+
+Extensions to TestResult
+========================
+
+StreamResult
+------------
+
+``StreamResult`` is a new API for dealing with test case progress that supports
+concurrent and distributed testing without the various issues that
+``TestResult`` has such as buffering in multiplexers.
+
+The design has several key principles:
+
+* Nothing that requires up-front knowledge of all tests.
+
+* Deal with tests running in concurrent environments, potentially distributed
+  across multiple processes (or even machines). This implies allowing multiple
+  tests to be active at once, supplying time explicitly, being able to
+  differentiate between tests running in different contexts and removing any
+  assumption that tests are necessarily in the same process.
+
+* Make the API as simple as possible - each aspect should do one thing well.
+
+The ``TestResult`` API this is intended to replace has three different clients.
+
+* Each executing ``TestCase`` notifies the ``TestResult`` about activity.
+
+* The testrunner running tests uses the API to find out whether the test run
+  had errors, how many tests ran and so on.
+
+* Finally, each ``TestCase`` queries the ``TestResult`` to see whether the test
+  run should be aborted.
+
+With ``StreamResult`` we need to be able to provide a ``TestResult`` compatible
+adapter (``StreamToExtendedDecorator``) to allow incremental migration.
+However, we don't need to conflate things long term. So - we define three
+separate APIs, and merely mix them together to provide the
+``StreamToExtendedDecorator``. ``StreamResult`` is the first of these APIs -
+meeting the needs of ``TestCase`` clients. It handles events generated by
+running tests. See the API documentation for ``testtools.StreamResult`` for
+details.
+
+StreamSummary
+-------------
+
+Secondly we define the ``StreamSummary`` API which takes responsibility for
+collating errors, detecting incomplete tests and counting tests. This provides
+a compatible API with those aspects of ``TestResult``. Again, see the API
+documentation for ``testtools.StreamSummary``.
+
+TestControl
+-----------
+
+Lastly we define the ``TestControl`` API which is used to provide the
+``shouldStop`` and ``stop`` elements from ``TestResult``. Again, see the API
+documentation for ``testtools.TestControl``. ``TestControl`` can be paired with
+a ``StreamFailFast`` to trigger aborting a test run when a failure is observed.
+Aborting multiple workers in a distributed environment requires hooking
+whatever signalling mechanism the distributed environment has up to a
+``TestControl`` in each worker process.
+
+StreamTagger
+------------
+
+A ``StreamResult`` filter that adds or removes tags from events::
+
+    >>> from testtools import StreamTagger
+    >>> sink = StreamResult()
+    >>> result = StreamTagger([sink], set(['add']), set(['discard']))
+    >>> result.startTestRun()
+    >>> # Run tests against result here.
+    >>> result.stopTestRun()
+
+StreamToDict
+------------
+
+A simplified API for dealing with ``StreamResult`` streams. Each test is
+buffered until it completes and then reported as a trivial dict. This makes
+writing analysers very easy - you can ignore all the plumbing and just work
+with the result. e.g.::
+
+    >>> from testtools import StreamToDict
+    >>> def handle_test(test_dict):
+    ...     print(test_dict['id'])
+    >>> result = StreamToDict(handle_test)
+    >>> result.startTestRun()
+    >>> # Run tests against result here.
+    >>> # At stopTestRun() any incomplete buffered tests are announced.
+    >>> result.stopTestRun()
+
+ExtendedToStreamDecorator
+-------------------------
+
+This is a hybrid object that combines both the ``Extended`` and ``Stream``
+``TestResult`` APIs into one class, but only emits ``StreamResult`` events.
+This is useful when a ``StreamResult`` stream is desired, but you cannot
+be sure that the tests which will run have been updated to the ``StreamResult``
+API.
+
+StreamToExtendedDecorator
+-------------------------
+
+This is a simple converter that emits the ``ExtendedTestResult`` API in
+response to events from the ``StreamResult`` API. Useful when outputting
+``StreamResult`` events from a ``TestCase`` but the supplied ``TestResult``
+does not support the ``status`` and ``file`` methods.
+
+StreamToQueue
+-------------
+
+This is a ``StreamResult`` decorator for reporting tests from multiple threads
+at once. Each method submits an event to a supplied Queue object as a simple
+dict. See ``ConcurrentStreamTestSuite`` for a convenient way to use this.
+
+TimestampingStreamResult
+------------------------
+
+This is a ``StreamResult`` decorator for adding timestamps to events that lack
+them. This allows writing the simplest possible generators of events and
+passing the events via this decorator to get timestamped data. As long as
+no buffering/queueing or blocking happen before the timestamper sees the event
+the timestamp will be as accurate as if the original event had it.
+
+StreamResultRouter
+------------------
+
+This is a ``StreamResult`` which forwards events to an arbitrary set of target
+``StreamResult`` objects. Events that have no forwarding rule are passed onto
+an fallback ``StreamResult`` for processing. The mapping can be changed at
+runtime, allowing great flexibility and responsiveness to changes. Because
+The mapping can change dynamically and there could be the same recipient for
+two different maps, ``startTestRun`` and ``stopTestRun`` handling is fine
+grained and up to the user.
+
+If no fallback has been supplied, an unroutable event will raise an exception.
+
+For instance::
+
+    >>> router = StreamResultRouter()
+    >>> sink = doubles.StreamResult()
+    >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+    ...     consume_route=True)
+    >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+Would remove the ``0/`` from the route_code and forward the event like so::
+
+    >>> sink.status('test_id=foo', route_code='1', test_status='uxsuccess')
+
+See ``pydoc testtools.StreamResultRouter`` for details.
+
+TestResult.addSkip
+------------------
+
+This method is called on result objects when a test skips. The
+``testtools.TestResult`` class records skips in its ``skip_reasons`` instance
+dict. The can be reported on in much the same way as succesful tests.
+
+
+TestResult.time
+---------------
+
+This method controls the time used by a ``TestResult``, permitting accurate
+timing of test results gathered on different machines or in different threads.
+See pydoc testtools.TestResult.time for more details.
+
+
+ThreadsafeForwardingResult
+--------------------------
+
+A ``TestResult`` which forwards activity to another test result, but synchronises
+on a semaphore to ensure that all the activity for a single test arrives in a
+batch. This allows simple TestResults which do not expect concurrent test
+reporting to be fed the activity from multiple test threads, or processes.
+
+Note that when you provide multiple errors for a single test, the target sees
+each error as a distinct complete test.
+
+
+MultiTestResult
+---------------
+
+A test result that dispatches its events to many test results.  Use this
+to combine multiple different test result objects into one test result object
+that can be passed to ``TestCase.run()`` or similar.  For example::
+
+  a = TestResult()
+  b = TestResult()
+  combined = MultiTestResult(a, b)
+  combined.startTestRun()  # Calls a.startTestRun() and b.startTestRun()
+
+Each of the methods on ``MultiTestResult`` will return a tuple of whatever the
+component test results return.
+
+
+TestResultDecorator
+-------------------
+
+Not strictly a ``TestResult``, but something that implements the extended
+``TestResult`` interface of testtools.  It can be subclassed to create objects
+that wrap ``TestResults``.
+
+
+TextTestResult
+--------------
+
+A ``TestResult`` that provides a text UI very similar to the Python standard
+library UI. Key differences are that its supports the extended outcomes and
+details API, and is completely encapsulated into the result object, permitting
+it to be used without a 'TestRunner' object. Not all the Python 2.7 outcomes
+are displayed (yet). It is also a 'quiet' result with no dots or verbose mode.
+These limitations will be corrected soon.
+
+
+ExtendedToOriginalDecorator
+---------------------------
+
+Adapts legacy ``TestResult`` objects, such as those found in older Pythons, to
+meet the testtools ``TestResult`` API.
+
+
+Test Doubles
+------------
+
+In testtools.testresult.doubles there are three test doubles that testtools
+uses for its own testing: ``Python26TestResult``, ``Python27TestResult``,
+``ExtendedTestResult``. These TestResult objects implement a single variation of
+the TestResult API each, and log activity to a list ``self._events``. These are
+made available for the convenience of people writing their own extensions.
+
+
+startTestRun and stopTestRun
+----------------------------
+
+Python 2.7 added hooks ``startTestRun`` and ``stopTestRun`` which are called
+before and after the entire test run. 'stopTestRun' is particularly useful for
+test results that wish to produce summary output.
+
+``testtools.TestResult`` provides default ``startTestRun`` and ``stopTestRun``
+methods, and he default testtools runner will call these methods
+appropriately.
+
+The ``startTestRun`` method will reset any errors, failures and so forth on
+the result, making the result object look as if no tests have been run.
+
+
+Extensions to TestSuite
+=======================
+
+ConcurrentTestSuite
+-------------------
+
+A TestSuite for parallel testing. This is used in conjuction with a helper that
+runs a single suite in some parallel fashion (for instance, forking, handing
+off to a subprocess, to a compute cloud, or simple threads).
+ConcurrentTestSuite uses the helper to get a number of separate runnable
+objects with a run(result), runs them all in threads using the
+ThreadsafeForwardingResult to coalesce their activity.
+
+ConcurrentStreamTestSuite
+-------------------------
+
+A variant of ConcurrentTestSuite that uses the new StreamResult API instead of
+the TestResult API. ConcurrentStreamTestSuite coordinates running some number
+of test/suites concurrently, with one StreamToQueue per test/suite.
+
+Each test/suite gets given its own ExtendedToStreamDecorator +
+TimestampingStreamResult wrapped StreamToQueue instance, forwarding onto the
+StreamResult that ConcurrentStreamTestSuite.run was called with.
+
+ConcurrentStreamTestSuite is a thin shim and it is easy to implement your own
+specialised form if that is needed.
+
+FixtureSuite
+------------
+
+A test suite that sets up a fixture_ before running any tests, and then tears
+it down after all of the tests are run. The fixture is *not* made available to
+any of the tests due to there being no standard channel for suites to pass
+information to the tests they contain (and we don't have enough data on what
+such a channel would need to achieve to design a good one yet - or even decide
+if it is a good idea).
+
+sorted_tests
+------------
+
+Given the composite structure of TestSuite / TestCase, sorting tests is
+problematic - you can't tell what functionality is embedded into custom Suite
+implementations. In order to deliver consistent test orders when using test
+discovery (see http://bugs.python.org/issue16709), testtools flattens and
+sorts tests that have the standard TestSuite, and defines a new method
+sort_tests, which can be used by non-standard TestSuites to know when they
+should sort their tests. An example implementation can be seen at
+``FixtureSuite.sorted_tests``.
+
+If there are duplicate test ids in a suite, ValueError will be raised.
+
+filter_by_ids
+-------------
+
+Similarly to ``sorted_tests`` running a subset of tests is problematic - the
+standard run interface provides no way to limit what runs. Rather than
+confounding the two problems (selection and execution) we defined a method
+that filters the tests in a suite (or a case) by their unique test id.
+If you a writing custom wrapping suites, consider implementing filter_by_ids
+to support this (though most wrappers that subclass ``unittest.TestSuite`` will
+work just fine [see ``testtools.testsuite.filter_by_ids`` for details.]
+
+Extensions to TestRunner
+========================
+
+To facilitate custom listing of tests, ``testtools.run.TestProgram`` attempts
+to call ``list`` on the ``TestRunner``, falling back to a generic
+implementation if it is not present.
+
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _fixture: http://pypi.python.org/pypi/fixtures
diff --git a/third_party/testtools/doc/for-test-authors.rst b/third_party/testtools/doc/for-test-authors.rst
new file mode 100644
index 0000000..5deb7ce
--- /dev/null
+++ b/third_party/testtools/doc/for-test-authors.rst
@@ -0,0 +1,1485 @@
+==========================
+testtools for test authors
+==========================
+
+If you are writing tests for a Python project and you (rather wisely) want to
+use testtools to do so, this is the manual for you.
+
+We assume that you already know Python and that you know something about
+automated testing already.
+
+If you are a test author of an unusually large or unusually unusual test
+suite, you might be interested in :doc:`for-framework-folk`.
+
+You might also be interested in the :doc:`testtools API docs </api>`.
+
+
+Introduction
+============
+
+testtools is a set of extensions to Python's standard unittest module.
+Writing tests with testtools is very much like writing tests with standard
+Python, or with Twisted's "trial_", or nose_, except a little bit easier and
+more enjoyable.
+
+Below, we'll try to give some examples of how to use testtools in its most
+basic way, as well as a sort of feature-by-feature breakdown of the cool bits
+that you could easily miss.
+
+
+The basics
+==========
+
+Here's what a basic testtools unit tests look like::
+
+  from testtools import TestCase
+  from myproject import silly
+
+  class TestSillySquare(TestCase):
+      """Tests for silly square function."""
+
+      def test_square(self):
+          # 'square' takes a number and multiplies it by itself.
+          result = silly.square(7)
+          self.assertEqual(result, 49)
+
+      def test_square_bad_input(self):
+          # 'square' raises a TypeError if it's given bad input, say a
+          # string.
+          self.assertRaises(TypeError, silly.square, "orange")
+
+
+Here you have a class that inherits from ``testtools.TestCase`` and bundles
+together a bunch of related tests.  The tests themselves are methods on that
+class that begin with ``test_``.
+
+Running your tests
+------------------
+
+You can run these tests in many ways.  testtools provides a very basic
+mechanism for doing so::
+
+  $ python -m testtools.run exampletest
+  Tests running...
+  Ran 2 tests in 0.000s
+
+  OK
+
+where 'exampletest' is a module that contains unit tests.  By default,
+``testtools.run`` will *not* recursively search the module or package for unit
+tests.  To do this, you will need to either have the discover_ module
+installed or have Python 2.7 or later, and then run::
+
+  $ python -m testtools.run discover packagecontainingtests
+
+For more information see the Python 2.7 unittest documentation, or::
+
+    python -m testtools.run --help
+
+As your testing needs grow and evolve, you will probably want to use a more
+sophisticated test runner.  There are many of these for Python, and almost all
+of them will happily run testtools tests.  In particular:
+
+* testrepository_
+* Trial_
+* nose_
+* unittest2_
+* `zope.testrunner`_ (aka zope.testing)
+
+From now on, we'll assume that you know how to run your tests.
+
+Running test with Distutils
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you are using Distutils_ to build your Python project, you can use the testtools
+Distutils_ command to integrate testtools into your Distutils_ workflow::
+
+  from distutils.core import setup
+  from testtools import TestCommand
+  setup(name='foo',
+      version='1.0',
+      py_modules=['foo'],
+      cmdclass={'test': TestCommand}
+  )
+
+You can then run::
+
+  $ python setup.py test -m exampletest
+  Tests running...
+  Ran 2 tests in 0.000s
+
+  OK
+
+For more information about the capabilities of the `TestCommand` command see::
+
+	$ python setup.py test --help
+
+You can use the `setup configuration`_ to specify the default behavior of the
+`TestCommand` command.
+
+Assertions
+==========
+
+The core of automated testing is making assertions about the way things are,
+and getting a nice, helpful, informative error message when things are not as
+they ought to be.
+
+All of the assertions that you can find in Python standard unittest_ can be
+found in testtools (remember, testtools extends unittest).  testtools changes
+the behaviour of some of those assertions slightly and adds some new
+assertions that you will almost certainly find useful.
+
+
+Improved assertRaises
+---------------------
+
+``TestCase.assertRaises`` returns the caught exception.  This is useful for
+asserting more things about the exception than just the type::
+
+  def test_square_bad_input(self):
+      # 'square' raises a TypeError if it's given bad input, say a
+      # string.
+      e = self.assertRaises(TypeError, silly.square, "orange")
+      self.assertEqual("orange", e.bad_value)
+      self.assertEqual("Cannot square 'orange', not a number.", str(e))
+
+Note that this is incompatible with the ``assertRaises`` in unittest2 and
+Python2.7.
+
+
+ExpectedException
+-----------------
+
+If you are using a version of Python that supports the ``with`` context
+manager syntax, you might prefer to use that syntax to ensure that code raises
+particular errors.  ``ExpectedException`` does just that.  For example::
+
+  def test_square_root_bad_input_2(self):
+      # 'square' raises a TypeError if it's given bad input.
+      with ExpectedException(TypeError, "Cannot square.*"):
+          silly.square('orange')
+
+The first argument to ``ExpectedException`` is the type of exception you
+expect to see raised.  The second argument is optional, and can be either a
+regular expression or a matcher. If it is a regular expression, the ``str()``
+of the raised exception must match the regular expression. If it is a matcher,
+then the raised exception object must match it. The optional third argument
+``msg`` will cause the raised error to be annotated with that message.
+
+
+assertIn, assertNotIn
+---------------------
+
+These two assertions check whether a value is in a sequence and whether a
+value is not in a sequence.  They are "assert" versions of the ``in`` and
+``not in`` operators.  For example::
+
+  def test_assert_in_example(self):
+      self.assertIn('a', 'cat')
+      self.assertNotIn('o', 'cat')
+      self.assertIn(5, list_of_primes_under_ten)
+      self.assertNotIn(12, list_of_primes_under_ten)
+
+
+assertIs, assertIsNot
+---------------------
+
+These two assertions check whether values are identical to one another.  This
+is sometimes useful when you want to test something more strict than mere
+equality.  For example::
+
+  def test_assert_is_example(self):
+      foo = [None]
+      foo_alias = foo
+      bar = [None]
+      self.assertIs(foo, foo_alias)
+      self.assertIsNot(foo, bar)
+      self.assertEqual(foo, bar) # They are equal, but not identical
+
+
+assertIsInstance
+----------------
+
+As much as we love duck-typing and polymorphism, sometimes you need to check
+whether or not a value is of a given type.  This method does that.  For
+example::
+
+  def test_assert_is_instance_example(self):
+      now = datetime.now()
+      self.assertIsInstance(now, datetime)
+
+Note that there is no ``assertIsNotInstance`` in testtools currently.
+
+
+expectFailure
+-------------
+
+Sometimes it's useful to write tests that fail.  For example, you might want
+to turn a bug report into a unit test, but you don't know how to fix the bug
+yet.  Or perhaps you want to document a known, temporary deficiency in a
+dependency.
+
+testtools gives you the ``TestCase.expectFailure`` to help with this.  You use
+it to say that you expect this assertion to fail.  When the test runs and the
+assertion fails, testtools will report it as an "expected failure".
+
+Here's an example::
+
+  def test_expect_failure_example(self):
+      self.expectFailure(
+          "cats should be dogs", self.assertEqual, 'cats', 'dogs')
+
+As long as 'cats' is not equal to 'dogs', the test will be reported as an
+expected failure.
+
+If ever by some miracle 'cats' becomes 'dogs', then testtools will report an
+"unexpected success".  Unlike standard unittest, testtools treats this as
+something that fails the test suite, like an error or a failure.
+
+
+Matchers
+========
+
+The built-in assertion methods are very useful, they are the bread and butter
+of writing tests.  However, soon enough you will probably want to write your
+own assertions.  Perhaps there are domain specific things that you want to
+check (e.g. assert that two widgets are aligned parallel to the flux grid), or
+perhaps you want to check something that could almost but not quite be found
+in some other standard library (e.g. assert that two paths point to the same
+file).
+
+When you are in such situations, you could either make a base class for your
+project that inherits from ``testtools.TestCase`` and make sure that all of
+your tests derive from that, *or* you could use the testtools ``Matcher``
+system.
+
+
+Using Matchers
+--------------
+
+Here's a really basic example using stock matchers found in testtools::
+
+  import testtools
+  from testtools.matchers import Equals
+
+  class TestSquare(TestCase):
+      def test_square(self):
+         result = square(7)
+         self.assertThat(result, Equals(49))
+
+The line ``self.assertThat(result, Equals(49))`` is equivalent to
+``self.assertEqual(result, 49)`` and means "assert that ``result`` equals 49".
+The difference is that ``assertThat`` is a more general method that takes some
+kind of observed value (in this case, ``result``) and any matcher object
+(here, ``Equals(49)``).
+
+The matcher object could be absolutely anything that implements the Matcher
+protocol.  This means that you can make more complex matchers by combining
+existing ones::
+
+  def test_square_silly(self):
+      result = square(7)
+      self.assertThat(result, Not(Equals(50)))
+
+Which is roughly equivalent to::
+
+  def test_square_silly(self):
+      result = square(7)
+      self.assertNotEqual(result, 50)
+
+
+``assert_that`` Function
+------------------------
+
+In addition to ``self.assertThat``, testtools also provides the ``assert_that``
+function in ``testtools.assertions`` This behaves like the method version does::
+
+    class TestSquare(TestCase):
+
+        def test_square():
+            result = square(7)
+            assert_that(result, Equals(49))
+
+        def test_square_silly():
+            result = square(7)
+            assert_that(result, Not(Equals(50)))
+
+
+Delayed Assertions
+~~~~~~~~~~~~~~~~~~
+
+A failure in the ``self.assertThat`` method will immediately fail the test: No
+more test code will be run after the assertion failure.
+
+The ``expectThat`` method behaves the same as ``assertThat`` with one
+exception: when failing the test it does so at the end of the test code rather
+than when the mismatch is detected. For example::
+
+  import subprocess
+
+  from testtools import TestCase
+  from testtools.matchers import Equals
+
+
+  class SomeProcessTests(TestCase):
+
+      def test_process_output(self):
+          process = subprocess.Popen(
+            ["my-app", "/some/path"],
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE
+          )
+
+          stdout, stderrr = process.communicate()
+
+          self.expectThat(process.returncode, Equals(0))
+          self.expectThat(stdout, Equals("Expected Output"))
+          self.expectThat(stderr, Equals(""))
+
+In this example, should the ``expectThat`` call fail, the failure will be
+recorded in the test result, but the test will continue as normal. If all
+three assertions fail, the test result will have three failures recorded, and
+the failure details for each failed assertion will be attached to the test
+result.
+
+Stock matchers
+--------------
+
+testtools comes with many matchers built in.  They can all be found in and
+imported from the ``testtools.matchers`` module.
+
+Equals
+~~~~~~
+
+Matches if two items are equal. For example::
+
+  def test_equals_example(self):
+      self.assertThat([42], Equals([42]))
+
+
+Is
+~~~
+
+Matches if two items are identical.  For example::
+
+  def test_is_example(self):
+      foo = object()
+      self.assertThat(foo, Is(foo))
+
+
+IsInstance
+~~~~~~~~~~
+
+Adapts isinstance() to use as a matcher.  For example::
+
+  def test_isinstance_example(self):
+      class MyClass:pass
+      self.assertThat(MyClass(), IsInstance(MyClass))
+      self.assertThat(MyClass(), IsInstance(MyClass, str))
+
+
+The raises helper
+~~~~~~~~~~~~~~~~~
+
+Matches if a callable raises a particular type of exception.  For example::
+
+  def test_raises_example(self):
+      self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+This is actually a convenience function that combines two other matchers:
+Raises_ and MatchesException_.
+
+
+DocTestMatches
+~~~~~~~~~~~~~~
+
+Matches a string as if it were the output of a doctest_ example.  Very useful
+for making assertions about large chunks of text.  For example::
+
+  import doctest
+
+  def test_doctest_example(self):
+      output = "Colorless green ideas"
+      self.assertThat(
+          output,
+          DocTestMatches("Colorless ... ideas", doctest.ELLIPSIS))
+
+We highly recommend using the following flags::
+
+  doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF
+
+
+GreaterThan
+~~~~~~~~~~~
+
+Matches if the given thing is greater than the thing in the matcher.  For
+example::
+
+  def test_greater_than_example(self):
+      self.assertThat(3, GreaterThan(2))
+
+
+LessThan
+~~~~~~~~
+
+Matches if the given thing is less than the thing in the matcher.  For
+example::
+
+  def test_less_than_example(self):
+      self.assertThat(2, LessThan(3))
+
+
+StartsWith, EndsWith
+~~~~~~~~~~~~~~~~~~~~
+
+These matchers check to see if a string starts with or ends with a particular
+substring.  For example::
+
+  def test_starts_and_ends_with_example(self):
+      self.assertThat('underground', StartsWith('und'))
+      self.assertThat('underground', EndsWith('und'))
+
+
+Contains
+~~~~~~~~
+
+This matcher checks to see if the given thing contains the thing in the
+matcher.  For example::
+
+  def test_contains_example(self):
+      self.assertThat('abc', Contains('b'))
+
+
+MatchesException
+~~~~~~~~~~~~~~~~
+
+Matches an exc_info tuple if the exception is of the correct type.  For
+example::
+
+  def test_matches_exception_example(self):
+      try:
+          raise RuntimeError('foo')
+      except RuntimeError:
+          exc_info = sys.exc_info()
+      self.assertThat(exc_info, MatchesException(RuntimeError))
+      self.assertThat(exc_info, MatchesException(RuntimeError('bar')))
+
+Most of the time, you will want to uses `The raises helper`_ instead.
+
+
+NotEquals
+~~~~~~~~~
+
+Matches if something is not equal to something else.  Note that this is subtly
+different to ``Not(Equals(x))``.  ``NotEquals(x)`` will match if ``y != x``,
+``Not(Equals(x))`` will match if ``not y == x``.
+
+You only need to worry about this distinction if you are testing code that
+relies on badly written overloaded equality operators.
+
+
+KeysEqual
+~~~~~~~~~
+
+Matches if the keys of one dict are equal to the keys of another dict.  For
+example::
+
+  def test_keys_equal(self):
+      x = {'a': 1, 'b': 2}
+      y = {'a': 2, 'b': 3}
+      self.assertThat(x, KeysEqual(y))
+
+
+MatchesRegex
+~~~~~~~~~~~~
+
+Matches a string against a regular expression, which is a wonderful thing to
+be able to do, if you think about it::
+
+  def test_matches_regex_example(self):
+      self.assertThat('foo', MatchesRegex('fo+'))
+
+
+HasLength
+~~~~~~~~~
+
+Check the length of a collection.  The following assertion will fail::
+
+  self.assertThat([1, 2, 3], HasLength(2))
+
+But this one won't::
+
+  self.assertThat([1, 2, 3], HasLength(3))
+
+
+File- and path-related matchers
+-------------------------------
+
+testtools also has a number of matchers to help with asserting things about
+the state of the filesystem.
+
+PathExists
+~~~~~~~~~~
+
+Matches if a path exists::
+
+  self.assertThat('/', PathExists())
+
+
+DirExists
+~~~~~~~~~
+
+Matches if a path exists and it refers to a directory::
+
+  # This will pass on most Linux systems.
+  self.assertThat('/home/', DirExists())
+  # This will not
+  self.assertThat('/home/jml/some-file.txt', DirExists())
+
+
+FileExists
+~~~~~~~~~~
+
+Matches if a path exists and it refers to a file (as opposed to a directory)::
+
+  # This will pass on most Linux systems.
+  self.assertThat('/bin/true', FileExists())
+  # This will not.
+  self.assertThat('/home/', FileExists())
+
+
+DirContains
+~~~~~~~~~~~
+
+Matches if the given directory contains the specified files and directories.
+Say we have a directory ``foo`` that has the files ``a``, ``b`` and ``c``,
+then::
+
+  self.assertThat('foo', DirContains(['a', 'b', 'c']))
+
+will match, but::
+
+  self.assertThat('foo', DirContains(['a', 'b']))
+
+will not.
+
+The matcher sorts both the input and the list of names we get back from the
+filesystem.
+
+You can use this in a more advanced way, and match the sorted directory
+listing against an arbitrary matcher::
+
+  self.assertThat('foo', DirContains(matcher=Contains('a')))
+
+
+FileContains
+~~~~~~~~~~~~
+
+Matches if the given file has the specified contents.  Say there's a file
+called ``greetings.txt`` with the contents, ``Hello World!``::
+
+  self.assertThat('greetings.txt', FileContains("Hello World!"))
+
+will match.
+
+You can also use this in a more advanced way, and match the contents of the
+file against an arbitrary matcher::
+
+  self.assertThat('greetings.txt', FileContains(matcher=Contains('!')))
+
+
+HasPermissions
+~~~~~~~~~~~~~~
+
+Used for asserting that a file or directory has certain permissions.  Uses
+octal-mode permissions for both input and matching.  For example::
+
+  self.assertThat('/tmp', HasPermissions('1777'))
+  self.assertThat('id_rsa', HasPermissions('0600'))
+
+This is probably more useful on UNIX systems than on Windows systems.
+
+
+SamePath
+~~~~~~~~
+
+Matches if two paths actually refer to the same thing.  The paths don't have
+to exist, but if they do exist, ``SamePath`` will resolve any symlinks.::
+
+  self.assertThat('somefile', SamePath('childdir/../somefile'))
+
+
+TarballContains
+~~~~~~~~~~~~~~~
+
+Matches the contents of a tarball.  In many ways, much like ``DirContains``,
+but instead of matching on ``os.listdir`` matches on ``TarFile.getnames``.
+
+
+Combining matchers
+------------------
+
+One great thing about matchers is that you can readily combine existing
+matchers to get variations on their behaviour or to quickly build more complex
+assertions.
+
+Below are a few of the combining matchers that come with testtools.
+
+
+Not
+~~~
+
+Negates another matcher.  For example::
+
+  def test_not_example(self):
+      self.assertThat([42], Not(Equals("potato")))
+      self.assertThat([42], Not(Is([42])))
+
+If you find yourself using ``Not`` frequently, you may wish to create a custom
+matcher for it.  For example::
+
+  IsNot = lambda x: Not(Is(x))
+
+  def test_not_example_2(self):
+      self.assertThat([42], IsNot([42]))
+
+
+Annotate
+~~~~~~~~
+
+Used to add custom notes to a matcher.  For example::
+
+  def test_annotate_example(self):
+      result = 43
+      self.assertThat(
+          result, Annotate("Not the answer to the Question!", Equals(42)))
+
+Since the annotation is only ever displayed when there is a mismatch
+(e.g. when ``result`` does not equal 42), it's a good idea to phrase the note
+negatively, so that it describes what a mismatch actually means.
+
+As with Not_, you may wish to create a custom matcher that describes a
+common operation.  For example::
+
+  PoliticallyEquals = lambda x: Annotate("Death to the aristos!", Equals(x))
+
+  def test_annotate_example_2(self):
+      self.assertThat("orange", PoliticallyEquals("yellow"))
+
+You can have assertThat perform the annotation for you as a convenience::
+
+  def test_annotate_example_3(self):
+      self.assertThat("orange", Equals("yellow"), "Death to the aristos!")
+
+
+AfterPreprocessing
+~~~~~~~~~~~~~~~~~~
+
+Used to make a matcher that applies a function to the matched object before
+matching. This can be used to aid in creating trivial matchers as functions, for
+example::
+
+  def test_after_preprocessing_example(self):
+      def PathHasFileContent(content):
+          def _read(path):
+              return open(path).read()
+          return AfterPreprocessing(_read, Equals(content))
+      self.assertThat('/tmp/foo.txt', PathHasFileContent("Hello world!"))
+
+
+MatchesAll
+~~~~~~~~~~
+
+Combines many matchers to make a new matcher.  The new matcher will only match
+things that match every single one of the component matchers.
+
+It's much easier to understand in Python than in English::
+
+  def test_matches_all_example(self):
+      has_und_at_both_ends = MatchesAll(StartsWith("und"), EndsWith("und"))
+      # This will succeed.
+      self.assertThat("underground", has_und_at_both_ends)
+      # This will fail.
+      self.assertThat("found", has_und_at_both_ends)
+      # So will this.
+      self.assertThat("undead", has_und_at_both_ends)
+
+At this point some people ask themselves, "why bother doing this at all? why
+not just have two separate assertions?".  It's a good question.
+
+The first reason is that when a ``MatchesAll`` gets a mismatch, the error will
+include information about all of the bits that mismatched.  When you have two
+separate assertions, as below::
+
+  def test_two_separate_assertions(self):
+       self.assertThat("foo", StartsWith("und"))
+       self.assertThat("foo", EndsWith("und"))
+
+Then you get absolutely no information from the second assertion if the first
+assertion fails.  Tests are largely there to help you debug code, so having
+more information in error messages is a big help.
+
+The second reason is that it is sometimes useful to give a name to a set of
+matchers. ``has_und_at_both_ends`` is a bit contrived, of course, but it is
+clear.  The ``FileExists`` and ``DirExists`` matchers included in testtools
+are perhaps better real examples.
+
+If you want only the first mismatch to be reported, pass ``first_only=True``
+as a keyword parameter to ``MatchesAll``.
+
+
+MatchesAny
+~~~~~~~~~~
+
+Like MatchesAll_, ``MatchesAny`` combines many matchers to make a new
+matcher.  The difference is that the new matchers will match a thing if it
+matches *any* of the component matchers.
+
+For example::
+
+  def test_matches_any_example(self):
+      self.assertThat(42, MatchesAny(Equals(5), Not(Equals(6))))
+
+
+AllMatch
+~~~~~~~~
+
+Matches many values against a single matcher.  Can be used to make sure that
+many things all meet the same condition::
+
+  def test_all_match_example(self):
+      self.assertThat([2, 3, 5, 7], AllMatch(LessThan(10)))
+
+If the match fails, then all of the values that fail to match will be included
+in the error message.
+
+In some ways, this is the converse of MatchesAll_.
+
+
+MatchesListwise
+~~~~~~~~~~~~~~~
+
+Where ``MatchesAny`` and ``MatchesAll`` combine many matchers to match a
+single value, ``MatchesListwise`` combines many matches to match many values.
+
+For example::
+
+  def test_matches_listwise_example(self):
+      self.assertThat(
+          [1, 2, 3], MatchesListwise(map(Equals, [1, 2, 3])))
+
+This is useful for writing custom, domain-specific matchers.
+
+If you want only the first mismatch to be reported, pass ``first_only=True``
+to ``MatchesListwise``.
+
+
+MatchesSetwise
+~~~~~~~~~~~~~~
+
+Combines many matchers to match many values, without regard to their order.
+
+Here's an example::
+
+  def test_matches_setwise_example(self):
+      self.assertThat(
+          [1, 2, 3], MatchesSetwise(Equals(2), Equals(3), Equals(1)))
+
+Much like ``MatchesListwise``, best used for writing custom, domain-specific
+matchers.
+
+
+MatchesStructure
+~~~~~~~~~~~~~~~~
+
+Creates a matcher that matches certain attributes of an object against a
+pre-defined set of matchers.
+
+It's much easier to understand in Python than in English::
+
+  def test_matches_structure_example(self):
+      foo = Foo()
+      foo.a = 1
+      foo.b = 2
+      matcher = MatchesStructure(a=Equals(1), b=Equals(2))
+      self.assertThat(foo, matcher)
+
+Since all of the matchers used were ``Equals``, we could also write this using
+the ``byEquality`` helper::
+
+  def test_matches_structure_example(self):
+      foo = Foo()
+      foo.a = 1
+      foo.b = 2
+      matcher = MatchesStructure.byEquality(a=1, b=2)
+      self.assertThat(foo, matcher)
+
+``MatchesStructure.fromExample`` takes an object and a list of attributes and
+creates a ``MatchesStructure`` matcher where each attribute of the matched
+object must equal each attribute of the example object.  For example::
+
+      matcher = MatchesStructure.fromExample(foo, 'a', 'b')
+
+is exactly equivalent to ``matcher`` in the previous example.
+
+
+MatchesPredicate
+~~~~~~~~~~~~~~~~
+
+Sometimes, all you want to do is create a matcher that matches if a given
+function returns True, and mismatches if it returns False.
+
+For example, you might have an ``is_prime`` function and want to make a
+matcher based on it::
+
+  def test_prime_numbers(self):
+      IsPrime = MatchesPredicate(is_prime, '%s is not prime.')
+      self.assertThat(7, IsPrime)
+      self.assertThat(1983, IsPrime)
+      # This will fail.
+      self.assertThat(42, IsPrime)
+
+Which will produce the error message::
+
+  Traceback (most recent call last):
+    File "...", line ..., in test_prime_numbers
+      self.assertThat(42, IsPrime)
+  MismatchError: 42 is not prime.
+
+
+MatchesPredicateWithParams
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes you can't use a trivial predicate and instead need to pass in some
+parameters each time. In that case, MatchesPredicateWithParams is your go-to
+tool for creating ad hoc matchers. MatchesPredicateWithParams takes a predicate
+function and message and returns a factory to produce matchers from that. The
+predicate needs to return a boolean (or any truthy object), and accept the
+object to match + whatever was passed into the factory.
+
+For example, you might have an ``divisible`` function and want to make a
+matcher based on it::
+
+  def test_divisible_numbers(self):
+      IsDivisibleBy = MatchesPredicateWithParams(
+          divisible, '{0} is not divisible by {1}')
+      self.assertThat(7, IsDivisibleBy(1))
+      self.assertThat(7, IsDivisibleBy(7))
+      self.assertThat(7, IsDivisibleBy(2))
+      # This will fail.
+
+Which will produce the error message::
+
+  Traceback (most recent call last):
+    File "...", line ..., in test_divisible
+      self.assertThat(7, IsDivisibleBy(2))
+  MismatchError: 7 is not divisible by 2.
+
+
+Raises
+~~~~~~
+
+Takes whatever the callable raises as an exc_info tuple and matches it against
+whatever matcher it was given.  For example, if you want to assert that a
+callable raises an exception of a given type::
+
+  def test_raises_example(self):
+      self.assertThat(
+          lambda: 1/0, Raises(MatchesException(ZeroDivisionError)))
+
+Although note that this could also be written as::
+
+  def test_raises_example_convenient(self):
+      self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+See also MatchesException_ and `the raises helper`_
+
+
+Writing your own matchers
+-------------------------
+
+Combining matchers is fun and can get you a very long way indeed, but
+sometimes you will have to write your own.  Here's how.
+
+You need to make two closely-linked objects: a ``Matcher`` and a
+``Mismatch``.  The ``Matcher`` knows how to actually make the comparison, and
+the ``Mismatch`` knows how to describe a failure to match.
+
+Here's an example matcher::
+
+  class IsDivisibleBy(object):
+      """Match if a number is divisible by another number."""
+      def __init__(self, divider):
+          self.divider = divider
+      def __str__(self):
+          return 'IsDivisibleBy(%s)' % (self.divider,)
+      def match(self, actual):
+          remainder = actual % self.divider
+          if remainder != 0:
+              return IsDivisibleByMismatch(actual, self.divider, remainder)
+          else:
+              return None
+
+The matcher has a constructor that takes parameters that describe what you
+actually *expect*, in this case a number that other numbers ought to be
+divisible by.  It has a ``__str__`` method, the result of which is displayed
+on failure by ``assertThat`` and a ``match`` method that does the actual
+matching.
+
+``match`` takes something to match against, here ``actual``, and decides
+whether or not it matches.  If it does match, then ``match`` must return
+``None``.  If it does *not* match, then ``match`` must return a ``Mismatch``
+object. ``assertThat`` will call ``match`` and then fail the test if it
+returns a non-None value.  For example::
+
+  def test_is_divisible_by_example(self):
+      # This succeeds, since IsDivisibleBy(5).match(10) returns None.
+      self.assertThat(10, IsDivisibleBy(5))
+      # This fails, since IsDivisibleBy(7).match(10) returns a mismatch.
+      self.assertThat(10, IsDivisibleBy(7))
+
+The mismatch is responsible for what sort of error message the failing test
+generates.  Here's an example mismatch::
+
+  class IsDivisibleByMismatch(object):
+      def __init__(self, number, divider, remainder):
+          self.number = number
+          self.divider = divider
+          self.remainder = remainder
+
+      def describe(self):
+          return "%r is not divisible by %r, %r remains" % (
+              self.number, self.divider, self.remainder)
+
+      def get_details(self):
+          return {}
+
+The mismatch takes information about the mismatch, and provides a ``describe``
+method that assembles all of that into a nice error message for end users.
+You can use the ``get_details`` method to provide extra, arbitrary data with
+the mismatch (e.g. the contents of a log file).  Most of the time it's fine to
+just return an empty dict.  You can read more about Details_ elsewhere in this
+document.
+
+Sometimes you don't need to create a custom mismatch class.  In particular, if
+you don't care *when* the description is calculated, then you can just do that
+in the Matcher itself like this::
+
+  def match(self, actual):
+      remainder = actual % self.divider
+      if remainder != 0:
+          return Mismatch(
+              "%r is not divisible by %r, %r remains" % (
+                  actual, self.divider, remainder))
+      else:
+          return None
+
+When writing a ``describe`` method or constructing a ``Mismatch`` object the
+code should ensure it only emits printable unicode.  As this output must be
+combined with other text and forwarded for presentation, letting through
+non-ascii bytes of ambiguous encoding or control characters could throw an
+exception or mangle the display.  In most cases simply avoiding the ``%s``
+format specifier and using ``%r`` instead will be enough.  For examples of
+more complex formatting see the ``testtools.matchers`` implementatons.
+
+
+Details
+=======
+
+As we may have mentioned once or twice already, one of the great benefits of
+automated tests is that they help find, isolate and debug errors in your
+system.
+
+Frequently however, the information provided by a mere assertion failure is
+not enough.  It's often useful to have other information: the contents of log
+files; what queries were run; benchmark timing information; what state certain
+subsystem components are in and so forth.
+
+testtools calls all of these things "details" and provides a single, powerful
+mechanism for including this information in your test run.
+
+Here's an example of how to add them::
+
+  from testtools import TestCase
+  from testtools.content import text_content
+
+  class TestSomething(TestCase):
+
+      def test_thingy(self):
+          self.addDetail('arbitrary-color-name', text_content("blue"))
+          1 / 0 # Gratuitous error!
+
+A detail an arbitrary piece of content given a name that's unique within the
+test.  Here the name is ``arbitrary-color-name`` and the content is
+``text_content("blue")``.  The name can be any text string, and the content
+can be any ``testtools.content.Content`` object.
+
+When the test runs, testtools will show you something like this::
+
+  ======================================================================
+  ERROR: exampletest.TestSomething.test_thingy
+  ----------------------------------------------------------------------
+  arbitrary-color-name: {{{blue}}}
+
+  Traceback (most recent call last):
+    File "exampletest.py", line 8, in test_thingy
+      1 / 0 # Gratuitous error!
+  ZeroDivisionError: integer division or modulo by zero
+  ------------
+  Ran 1 test in 0.030s
+
+As you can see, the detail is included as an attachment, here saying
+that our arbitrary-color-name is "blue".
+
+
+Content
+-------
+
+For the actual content of details, testtools uses its own MIME-based Content
+object.  This allows you to attach any information that you could possibly
+conceive of to a test, and allows testtools to use or serialize that
+information.
+
+The basic ``testtools.content.Content`` object is constructed from a
+``testtools.content.ContentType`` and a nullary callable that must return an
+iterator of chunks of bytes that the content is made from.
+
+So, to make a Content object that is just a simple string of text, you can
+do::
+
+  from testtools.content import Content
+  from testtools.content_type import ContentType
+
+  text = Content(ContentType('text', 'plain'), lambda: ["some text"])
+
+Because adding small bits of text content is very common, there's also a
+convenience method::
+
+  text = text_content("some text")
+
+To make content out of an image stored on disk, you could do something like::
+
+  image = Content(ContentType('image', 'png'), lambda: open('foo.png').read())
+
+Or you could use the convenience function::
+
+  image = content_from_file('foo.png', ContentType('image', 'png'))
+
+The ``lambda`` helps make sure that the file is opened and the actual bytes
+read only when they are needed – by default, when the test is finished.  This
+means that tests can construct and add Content objects freely without worrying
+too much about how they affect run time.
+
+
+A realistic example
+-------------------
+
+A very common use of details is to add a log file to failing tests.  Say your
+project has a server represented by a class ``SomeServer`` that you can start
+up and shut down in tests, but runs in another process.  You want to test
+interaction with that server, and whenever the interaction fails, you want to
+see the client-side error *and* the logs from the server-side.  Here's how you
+might do it::
+
+  from testtools import TestCase
+  from testtools.content import attach_file, Content
+  from testtools.content_type import UTF8_TEXT
+
+  from myproject import SomeServer
+
+  class SomeTestCase(TestCase):
+
+      def setUp(self):
+          super(SomeTestCase, self).setUp()
+          self.server = SomeServer()
+          self.server.start_up()
+          self.addCleanup(self.server.shut_down)
+          self.addCleanup(attach_file, self.server.logfile, self)
+
+      def attach_log_file(self):
+          self.addDetail(
+              'log-file',
+              Content(UTF8_TEXT,
+                      lambda: open(self.server.logfile, 'r').readlines()))
+
+      def test_a_thing(self):
+          self.assertEqual("cool", self.server.temperature)
+
+This test will attach the log file of ``SomeServer`` to each test that is
+run.  testtools will only display the log file for failing tests, so it's not
+such a big deal.
+
+If the act of adding at detail is expensive, you might want to use
+addOnException_ so that you only do it when a test actually raises an
+exception.
+
+
+Controlling test execution
+==========================
+
+.. _addCleanup:
+
+addCleanup
+----------
+
+``TestCase.addCleanup`` is a robust way to arrange for a clean up function to
+be called before ``tearDown``.  This is a powerful and simple alternative to
+putting clean up logic in a try/finally block or ``tearDown`` method.  For
+example::
+
+  def test_foo(self):
+      foo.lock()
+      self.addCleanup(foo.unlock)
+      ...
+
+This is particularly useful if you have some sort of factory in your test::
+
+  def make_locked_foo(self):
+      foo = Foo()
+      foo.lock()
+      self.addCleanup(foo.unlock)
+      return foo
+
+  def test_frotz_a_foo(self):
+      foo = self.make_locked_foo()
+      foo.frotz()
+      self.assertEqual(foo.frotz_count, 1)
+
+Any extra arguments or keyword arguments passed to ``addCleanup`` are passed
+to the callable at cleanup time.
+
+Cleanups can also report multiple errors, if appropriate by wrapping them in
+a ``testtools.MultipleExceptions`` object::
+
+  raise MultipleExceptions(exc_info1, exc_info2)
+
+
+Fixtures
+--------
+
+Tests often depend on a system being set up in a certain way, or having
+certain resources available to them.  Perhaps a test needs a connection to the
+database or access to a running external server.
+
+One common way of doing this is to do::
+
+  class SomeTest(TestCase):
+      def setUp(self):
+          super(SomeTest, self).setUp()
+          self.server = Server()
+          self.server.setUp()
+          self.addCleanup(self.server.tearDown)
+
+testtools provides a more convenient, declarative way to do the same thing::
+
+  class SomeTest(TestCase):
+      def setUp(self):
+          super(SomeTest, self).setUp()
+          self.server = self.useFixture(Server())
+
+``useFixture(fixture)`` calls ``setUp`` on the fixture, schedules a clean up
+to clean it up, and schedules a clean up to attach all details_ held by the
+fixture to the test case.  The fixture object must meet the
+``fixtures.Fixture`` protocol (version 0.3.4 or newer, see fixtures_).
+
+If you have anything beyond the most simple test set up, we recommend that
+you put this set up into a ``Fixture`` class.  Once there, the fixture can be
+easily re-used by other tests and can be combined with other fixtures to make
+more complex resources.
+
+
+Skipping tests
+--------------
+
+Many reasons exist to skip a test: a dependency might be missing; a test might
+be too expensive and thus should not berun while on battery power; or perhaps
+the test is testing an incomplete feature.
+
+``TestCase.skipTest`` is a simple way to have a test stop running and be
+reported as a skipped test, rather than a success, error or failure.  For
+example::
+
+  def test_make_symlink(self):
+      symlink = getattr(os, 'symlink', None)
+      if symlink is None:
+          self.skipTest("No symlink support")
+      symlink(whatever, something_else)
+
+Using ``skipTest`` means that you can make decisions about what tests to run
+as late as possible, and close to the actual tests.  Without it, you might be
+forced to use convoluted logic during test loading, which is a bit of a mess.
+
+
+Legacy skip support
+~~~~~~~~~~~~~~~~~~~
+
+If you are using this feature when running your test suite with a legacy
+``TestResult`` object that is missing the ``addSkip`` method, then the
+``addError`` method will be invoked instead.  If you are using a test result
+from testtools, you do not have to worry about this.
+
+In older versions of testtools, ``skipTest`` was known as ``skip``. Since
+Python 2.7 added ``skipTest`` support, the ``skip`` name is now deprecated.
+No warning is emitted yet – some time in the future we may do so.
+
+
+addOnException
+--------------
+
+Sometimes, you might wish to do something only when a test fails.  Perhaps you
+need to run expensive diagnostic routines or some such.
+``TestCase.addOnException`` allows you to easily do just this.  For example::
+
+  class SomeTest(TestCase):
+      def setUp(self):
+          super(SomeTest, self).setUp()
+          self.server = self.useFixture(SomeServer())
+          self.addOnException(self.attach_server_diagnostics)
+
+      def attach_server_diagnostics(self, exc_info):
+          self.server.prep_for_diagnostics() # Expensive!
+          self.addDetail('server-diagnostics', self.server.get_diagnostics)
+
+      def test_a_thing(self):
+          self.assertEqual('cheese', 'chalk')
+
+In this example, ``attach_server_diagnostics`` will only be called when a test
+fails.  It is given the exc_info tuple of the error raised by the test, just
+in case it is needed.
+
+
+Twisted support
+---------------
+
+testtools provides *highly experimental* support for running Twisted tests –
+tests that return a Deferred_ and rely on the Twisted reactor.  You should not
+use this feature right now.  We reserve the right to change the API and
+behaviour without telling you first.
+
+However, if you are going to, here's how you do it::
+
+  from testtools import TestCase
+  from testtools.deferredruntest import AsynchronousDeferredRunTest
+
+  class MyTwistedTests(TestCase):
+
+      run_tests_with = AsynchronousDeferredRunTest
+
+      def test_foo(self):
+          # ...
+          return d
+
+In particular, note that you do *not* have to use a special base ``TestCase``
+in order to run Twisted tests.
+
+You can also run individual tests within a test case class using the Twisted
+test runner::
+
+   class MyTestsSomeOfWhichAreTwisted(TestCase):
+
+       def test_normal(self):
+           pass
+
+       @run_test_with(AsynchronousDeferredRunTest)
+       def test_twisted(self):
+           # ...
+           return d
+
+Here are some tips for converting your Trial tests into testtools tests.
+
+* Use the ``AsynchronousDeferredRunTest`` runner
+* Make sure to upcall to ``setUp`` and ``tearDown``
+* Don't use ``setUpClass`` or ``tearDownClass``
+* Don't expect setting .todo, .timeout or .skip attributes to do anything
+* ``flushLoggedErrors`` is ``testtools.deferredruntest.flush_logged_errors``
+* ``assertFailure`` is ``testtools.deferredruntest.assert_fails_with``
+* Trial spins the reactor a couple of times before cleaning it up,
+  ``AsynchronousDeferredRunTest`` does not.  If you rely on this behavior, use
+  ``AsynchronousDeferredRunTestForBrokenTwisted``.
+
+force_failure
+-------------
+
+Setting the ``testtools.TestCase.force_failure`` instance variable to ``True``
+will cause the test to be marked as a failure, but won't stop the test code
+from running (see :ref:`force_failure`).
+
+
+Test helpers
+============
+
+testtools comes with a few little things that make it a little bit easier to
+write tests.
+
+
+TestCase.patch
+--------------
+
+``patch`` is a convenient way to monkey-patch a Python object for the duration
+of your test.  It's especially useful for testing legacy code.  e.g.::
+
+  def test_foo(self):
+      my_stream = StringIO()
+      self.patch(sys, 'stderr', my_stream)
+      run_some_code_that_prints_to_stderr()
+      self.assertEqual('', my_stream.getvalue())
+
+The call to ``patch`` above masks ``sys.stderr`` with ``my_stream`` so that
+anything printed to stderr will be captured in a StringIO variable that can be
+actually tested. Once the test is done, the real ``sys.stderr`` is restored to
+its rightful place.
+
+
+Creation methods
+----------------
+
+Often when writing unit tests, you want to create an object that is a
+completely normal instance of its type.  You don't want there to be anything
+special about its properties, because you are testing generic behaviour rather
+than specific conditions.
+
+A lot of the time, test authors do this by making up silly strings and numbers
+and passing them to constructors (e.g. 42, 'foo', "bar" etc), and that's
+fine.  However, sometimes it's useful to be able to create arbitrary objects
+at will, without having to make up silly sample data.
+
+To help with this, ``testtools.TestCase`` implements creation methods called
+``getUniqueString`` and ``getUniqueInteger``.  They return strings and
+integers that are unique within the context of the test that can be used to
+assemble more complex objects.  Here's a basic example where
+``getUniqueString`` is used instead of saying "foo" or "bar" or whatever::
+
+  class SomeTest(TestCase):
+
+      def test_full_name(self):
+          first_name = self.getUniqueString()
+          last_name = self.getUniqueString()
+          p = Person(first_name, last_name)
+          self.assertEqual(p.full_name, "%s %s" % (first_name, last_name))
+
+
+And here's how it could be used to make a complicated test::
+
+  class TestCoupleLogic(TestCase):
+
+      def make_arbitrary_person(self):
+          return Person(self.getUniqueString(), self.getUniqueString())
+
+      def test_get_invitation(self):
+          a = self.make_arbitrary_person()
+          b = self.make_arbitrary_person()
+          couple = Couple(a, b)
+          event_name = self.getUniqueString()
+          invitation = couple.get_invitation(event_name)
+          self.assertEqual(
+              invitation,
+              "We invite %s and %s to %s" % (
+                  a.full_name, b.full_name, event_name))
+
+Essentially, creation methods like these are a way of reducing the number of
+assumptions in your tests and communicating to test readers that the exact
+details of certain variables don't actually matter.
+
+See pages 419-423 of `xUnit Test Patterns`_ by Gerard Meszaros for a detailed
+discussion of creation methods.
+
+Test attributes
+---------------
+
+Inspired by the ``nosetests`` ``attr`` plugin, testtools provides support for
+marking up test methods with attributes, which are then exposed in the test
+id and can be used when filtering tests by id. (e.g. via ``--load-list``)::
+
+  from testtools.testcase import attr, WithAttributes
+
+  class AnnotatedTests(WithAttributes, TestCase):
+
+      @attr('simple')
+      def test_one(self):
+          pass
+
+      @attr('more', 'than', 'one')
+      def test_two(self):
+          pass
+
+      @attr('or')
+      @attr('stacked')
+      def test_three(self):
+          pass
+
+General helpers
+===============
+
+Conditional imports
+-------------------
+
+Lots of the time we would like to conditionally import modules.  testtools
+uses the small library extras to do this. This used to be part of testtools.
+
+Instead of::
+
+  try:
+      from twisted.internet import defer
+  except ImportError:
+      defer = None
+
+You can do::
+
+   defer = try_import('twisted.internet.defer')
+
+
+Instead of::
+
+  try:
+      from StringIO import StringIO
+  except ImportError:
+      from io import StringIO
+
+You can do::
+
+  StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+
+Safe attribute testing
+----------------------
+
+``hasattr`` is broken_ on many versions of Python. The helper ``safe_hasattr``
+can be used to safely test whether an object has a particular attribute. Like
+``try_import`` this used to be in testtools but is now in extras.
+
+
+Nullary callables
+-----------------
+
+Sometimes you want to be able to pass around a function with the arguments
+already specified.  The normal way of doing this in Python is::
+
+  nullary = lambda: f(*args, **kwargs)
+  nullary()
+
+Which is mostly good enough, but loses a bit of debugging information.  If you
+take the ``repr()`` of ``nullary``, you're only told that it's a lambda, and
+you get none of the juicy meaning that you'd get from the ``repr()`` of ``f``.
+
+The solution is to use ``Nullary`` instead::
+
+  nullary = Nullary(f, *args, **kwargs)
+  nullary()
+
+Here, ``repr(nullary)`` will be the same as ``repr(f)``.
+
+
+.. _testrepository: https://launchpad.net/testrepository
+.. _Trial: http://twistedmatrix.com/documents/current/core/howto/testing.html
+.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
+.. _unittest2: http://pypi.python.org/pypi/unittest2
+.. _zope.testrunner: http://pypi.python.org/pypi/zope.testrunner/
+.. _xUnit test patterns: http://xunitpatterns.com/
+.. _fixtures: http://pypi.python.org/pypi/fixtures
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _doctest: http://docs.python.org/library/doctest.html
+.. _Deferred: http://twistedmatrix.com/documents/current/core/howto/defer.html
+.. _discover: http://pypi.python.org/pypi/discover
+.. _Distutils: http://docs.python.org/library/distutils.html
+.. _`setup configuration`: http://docs.python.org/distutils/configfile.html
+.. _broken: http://chipaca.com/post/3210673069/hasattr-17-less-harmful
diff --git a/third_party/testtools/doc/hacking.rst b/third_party/testtools/doc/hacking.rst
new file mode 100644
index 0000000..b25461f
--- /dev/null
+++ b/third_party/testtools/doc/hacking.rst
@@ -0,0 +1,194 @@
+=========================
+Contributing to testtools
+=========================
+
+Bugs and patches
+----------------
+
+`File bugs <https://bugs.launchpad.net/testtools/+filebug>` on Launchpad, and
+`send patches <https://github.com/testing-cabal/testtools/>` on Github.
+
+
+Coding style
+------------
+
+In general, follow `PEP 8`_ except where consistency with the standard
+library's unittest_ module would suggest otherwise.
+
+testtools currently supports Python 2.6 and later, including Python 3.
+
+Copyright assignment
+--------------------
+
+Part of testtools raison d'etre is to provide Python with improvements to the
+testing code it ships. For that reason we require all contributions (that are
+non-trivial) to meet one of the following rules:
+
+* be inapplicable for inclusion in Python.
+* be able to be included in Python without further contact with the contributor.
+* be copyright assigned to Jonathan M. Lange.
+
+Please pick one of these and specify it when contributing code to testtools.
+
+
+Licensing
+---------
+
+All code that is not copyright assigned to Jonathan M. Lange (see Copyright
+Assignment above) needs to be licensed under the `MIT license`_ that testtools
+uses, so that testtools can ship it.
+
+
+Testing
+-------
+
+Please write tests for every feature.  This project ought to be a model
+example of well-tested Python code!
+
+Take particular care to make sure the *intent* of each test is clear.
+
+You can run tests with ``make check``.
+
+By default, testtools hides many levels of its own stack when running tests.
+This is for the convenience of users, who do not care about how, say, assert
+methods are implemented. However, when writing tests for testtools itself, it
+is often useful to see all levels of the stack. To do this, add
+``run_tests_with = FullStackRunTest`` to the top of a test's class definition.
+
+
+Discussion
+----------
+
+When submitting a patch, it will help the review process a lot if there's a
+clear explanation of what the change does and why you think the change is a
+good idea.  For crasher bugs, this is generally a no-brainer, but for UI bugs
+& API tweaks, the reason something is an improvement might not be obvious, so
+it's worth spelling out.
+
+If you are thinking of implementing a new feature, you might want to have that
+discussion on the [mailing list](testtools-dev at lists.launchpad.net) before the
+patch goes up for review.  This is not at all mandatory, but getting feedback
+early can help avoid dead ends.
+
+
+Documentation
+-------------
+
+Documents are written using the Sphinx_ variant of reStructuredText_.  All
+public methods, functions, classes and modules must have API documentation.
+When changing code, be sure to check the API documentation to see if it could
+be improved.  Before submitting changes to trunk, look over them and see if
+the manuals ought to be updated.
+
+
+Source layout
+-------------
+
+The top-level directory contains the ``testtools/`` package directory, and
+miscellaneous files like ``README.rst`` and ``setup.py``.
+
+The ``testtools/`` directory is the Python package itself.  It is separated
+into submodules for internal clarity, but all public APIs should be “promoted”
+into the top-level package by importing them in ``testtools/__init__.py``.
+Users of testtools should never import a submodule in order to use a stable
+API.  Unstable APIs like ``testtools.matchers`` and
+``testtools.deferredruntest`` should be exported as submodules.
+
+Tests belong in ``testtools/tests/``.
+
+
+Committing to trunk
+-------------------
+
+Testtools is maintained using git, with its master repo at
+https://github.com/testing-cabal/testtools. This gives every contributor the
+ability to commit their work to their own branches. However permission must be
+granted to allow contributors to commit to the trunk branch.
+
+Commit access to trunk is obtained by joining the `testing-cabal`_, either as an
+Owner or a Committer. Commit access is contingent on obeying the testtools
+contribution policy, see `Copyright Assignment`_ above.
+
+
+Code Review
+-----------
+
+All code must be reviewed before landing on trunk. The process is to create a
+branch on Github, and make a pull request into trunk. It will then be reviewed
+before it can be merged to trunk. It will be reviewed by someone:
+
+* not the author
+* a committer
+
+As a special exception, since there are few testtools committers and thus
+reviews are prone to blocking, a pull request from a committer that has not been
+reviewed after 24 hours may be merged by that committer. When the team is larger
+this policy will be revisited.
+
+Code reviewers should look for the quality of what is being submitted,
+including conformance with this HACKING file.
+
+Changes which all users should be made aware of should be documented in NEWS.
+
+We are now in full backwards compatibility mode - no more releases < 1.0.0, and
+breaking compatibility will require consensus on the testtools-dev mailing list.
+Exactly what constitutes a backwards incompatible change is vague, but coarsely:
+
+* adding required arguments or required calls to something that used to work
+* removing keyword or position arguments, removing methods, functions or modules
+* changing behaviour someone may have reasonably depended on
+
+Some things are not compatibility issues:
+
+* changes to _ prefixed methods, functions, modules, packages.
+
+
+NEWS management
+---------------
+
+The file NEWS is structured as a sorted list of releases. Each release can have
+a free form description and more or more sections with bullet point items.
+Sections in use today are 'Improvements' and 'Changes'. To ease merging between
+branches, the bullet points are kept alphabetically sorted. The release NEXT is
+permanently present at the top of the list.
+
+
+Releasing
+---------
+
+Prerequisites
++++++++++++++
+
+Membership in the testing-cabal org on github as committer.
+
+Membership in the pypi testtools project as maintainer.
+
+Membership in the https://launchpad.net/~testtools-committers.
+
+Tasks
++++++
+
+#. Choose a version number, say X.Y.Z
+#. In trunk, ensure __init__ has version ``(X, Y, Z, 'final', 0)``
+#. Under NEXT in NEWS add a heading with the version number X.Y.Z.
+#. Possibly write a blurb into NEWS.
+#. Commit the changes.
+#. Tag the release, ``git tag -s testtools-X.Y.Z``
+#. Run 'make release', this:
+   #. Creates a source distribution and uploads to PyPI
+   #. Ensures all Fix Committed bugs are in the release milestone
+   #. Makes a release on Launchpad and uploads the tarball
+   #. Marks all the Fix Committed bugs as Fix Released
+   #. Creates a new milestone
+#. Change __version__ in __init__.py to the probable next version.
+   e.g. to ``(X, Y, Z+1, 'dev', 0)``.
+#. Commit 'Opening X.Y.Z+1 for development.'
+#. If a new series has been created (e.g. 0.10.0), make the series on Launchpad.
+#. Push trunk to Github, ``git push --tags origin master``
+
+.. _PEP 8: http://www.python.org/dev/peps/pep-0008/
+.. _unittest: http://docs.python.org/library/unittest.html
+.. _MIT license: http://www.opensource.org/licenses/mit-license.php
+.. _Sphinx: http://sphinx.pocoo.org/
+.. _restructuredtext: http://docutils.sourceforge.net/rst.html
+.. _testing-cabal: https://github.com/organizations/testing-cabal/
diff --git a/third_party/testtools/doc/index.rst b/third_party/testtools/doc/index.rst
new file mode 100644
index 0000000..67bb017
--- /dev/null
+++ b/third_party/testtools/doc/index.rst
@@ -0,0 +1,35 @@
+.. testtools documentation master file, created by
+   sphinx-quickstart on Sun Nov 28 13:45:40 2010.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+testtools: tasteful testing for Python
+======================================
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework. These extensions have been derived from many years of experience
+with unit testing in Python and come from many different sources. testtools
+also ports recent unittest changes all the way back to Python 2.4.  The next
+release of testtools will change that to support versions that are maintained
+by the Python community instead, to allow the use of modern language features
+within testtools.
+
+
+Contents:
+
+.. toctree::
+   :maxdepth: 1
+
+   overview
+   for-test-authors
+   for-framework-folk
+   hacking
+   Changes to testtools <news>
+   API reference documentation <api>
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/third_party/testtools/doc/make.bat b/third_party/testtools/doc/make.bat
new file mode 100644
index 0000000..f8c1fd5
--- /dev/null
+++ b/third_party/testtools/doc/make.bat
@@ -0,0 +1,113 @@
+ at ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=sphinx-build
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html      to make standalone HTML files
+	echo.  dirhtml   to make HTML files named index.html in directories
+	echo.  pickle    to make pickle files
+	echo.  json      to make JSON files
+	echo.  htmlhelp  to make HTML files and a HTML help project
+	echo.  qthelp    to make HTML files and a qthelp project
+	echo.  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  changes   to make an overview over all changed/added/deprecated items
+	echo.  linkcheck to check all external links for integrity
+	echo.  doctest   to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\testtools.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\testtools.ghc
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+:end
diff --git a/third_party/testtools/doc/overview.rst b/third_party/testtools/doc/overview.rst
new file mode 100644
index 0000000..a01dc3d
--- /dev/null
+++ b/third_party/testtools/doc/overview.rst
@@ -0,0 +1,101 @@
+======================================
+testtools: tasteful testing for Python
+======================================
+
+testtools is a set of extensions to the Python standard library's unit testing
+framework. These extensions have been derived from many years of experience
+with unit testing in Python and come from many different sources. testtools
+supports Python versions all the way back to Python 2.6.
+
+What better way to start than with a contrived code snippet?::
+
+  from testtools import TestCase
+  from testtools.content import Content
+  from testtools.content_type import UTF8_TEXT
+  from testtools.matchers import Equals
+
+  from myproject import SillySquareServer
+
+  class TestSillySquareServer(TestCase):
+
+      def setUp(self):
+          super(TestSillySquare, self).setUp()
+          self.server = self.useFixture(SillySquareServer())
+          self.addCleanup(self.attach_log_file)
+
+      def attach_log_file(self):
+          self.addDetail(
+              'log-file',
+              Content(UTF8_TEXT,
+                      lambda: open(self.server.logfile, 'r').readlines()))
+
+      def test_server_is_cool(self):
+          self.assertThat(self.server.temperature, Equals("cool"))
+
+      def test_square(self):
+          self.assertThat(self.server.silly_square_of(7), Equals(49))
+
+
+Why use testtools?
+==================
+
+Better assertion methods
+------------------------
+
+The standard assertion methods that come with unittest aren't as helpful as
+they could be, and there aren't quite enough of them.  testtools adds
+``assertIn``, ``assertIs``, ``assertIsInstance`` and their negatives.
+
+
+Matchers: better than assertion methods
+---------------------------------------
+
+Of course, in any serious project you want to be able to have assertions that
+are specific to that project and the particular problem that it is addressing.
+Rather than forcing you to define your own assertion methods and maintain your
+own inheritance hierarchy of ``TestCase`` classes, testtools lets you write
+your own "matchers", custom predicates that can be plugged into a unit test::
+
+  def test_response_has_bold(self):
+     # The response has bold text.
+     response = self.server.getResponse()
+     self.assertThat(response, HTMLContains(Tag('bold', 'b')))
+
+
+More debugging info, when you need it
+--------------------------------------
+
+testtools makes it easy to add arbitrary data to your test result.  If you
+want to know what's in a log file when a test fails, or what the load was on
+the computer when a test started, or what files were open, you can add that
+information with ``TestCase.addDetail``, and it will appear in the test
+results if that test fails.
+
+
+Extend unittest, but stay compatible and re-usable
+--------------------------------------------------
+
+testtools goes to great lengths to allow serious test authors and test
+*framework* authors to do whatever they like with their tests and their
+extensions while staying compatible with the standard library's unittest.
+
+testtools has completely parametrized how exceptions raised in tests are
+mapped to ``TestResult`` methods and how tests are actually executed (ever
+wanted ``tearDown`` to be called regardless of whether ``setUp`` succeeds?)
+
+It also provides many simple but handy utilities, like the ability to clone a
+test, a ``MultiTestResult`` object that lets many result objects get the
+results from one test suite, adapters to bring legacy ``TestResult`` objects
+into our new golden age.
+
+
+Cross-Python compatibility
+--------------------------
+
+testtools gives you the very latest in unit testing technology in a way that
+will work with Python 2.6, 2.7, 3.1 and 3.2.
+
+If you wish to use testtools with Python 2.4 or 2.5, then please use testtools
+0.9.15. Up to then we supported Python 2.4 and 2.5, but we found the
+constraints involved in not using the newer language features onerous as we
+added more support for versions post Python 3.
diff --git a/third_party/testtools/scripts/README b/third_party/testtools/scripts/README
new file mode 100644
index 0000000..648f105
--- /dev/null
+++ b/third_party/testtools/scripts/README
@@ -0,0 +1,3 @@
+These are scripts to help with building, maintaining and releasing testtools.
+
+There is little here for anyone except a testtools contributor.
diff --git a/third_party/testtools/scripts/_lp_release.py b/third_party/testtools/scripts/_lp_release.py
new file mode 100644
index 0000000..ac27e47
--- /dev/null
+++ b/third_party/testtools/scripts/_lp_release.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+
+"""Release testtools on Launchpad.
+
+Steps:
+ 1. Make sure all "Fix committed" bugs are assigned to 'next'
+ 2. Rename 'next' to the new version
+ 3. Release the milestone
+ 4. Upload the tarball
+ 5. Create a new 'next' milestone
+ 6. Mark all "Fix committed" bugs in the milestone as "Fix released"
+
+Assumes that NEWS is in the parent directory, that the release sections are
+underlined with '~' and the subsections are underlined with '-'.
+
+Assumes that this file is in the 'scripts' directory a testtools tree that has
+already had a tarball built and uploaded with 'python setup.py sdist upload
+--sign'.
+"""
+
+from datetime import datetime, timedelta, tzinfo
+import logging
+import os
+import sys
+
+from launchpadlib.launchpad import Launchpad
+from launchpadlib import uris
+
+
+APP_NAME = 'testtools-lp-release'
+CACHE_DIR = os.path.expanduser('~/.launchpadlib/cache')
+SERVICE_ROOT = uris.LPNET_SERVICE_ROOT
+
+FIX_COMMITTED = u"Fix Committed"
+FIX_RELEASED = u"Fix Released"
+
+# Launchpad file type for a tarball upload.
+CODE_RELEASE_TARBALL = 'Code Release Tarball'
+
+PROJECT_NAME = 'testtools'
+NEXT_MILESTONE_NAME = 'next'
+
+
+class _UTC(tzinfo):
+    """UTC"""
+
+    def utcoffset(self, dt):
+        return timedelta(0)
+
+    def tzname(self, dt):
+        return "UTC"
+
+    def dst(self, dt):
+        return timedelta(0)
+
+UTC = _UTC()
+
+
+def configure_logging():
+    level = logging.INFO
+    log = logging.getLogger(APP_NAME)
+    log.setLevel(level)
+    handler = logging.StreamHandler()
+    handler.setLevel(level)
+    formatter = logging.Formatter("%(levelname)s: %(message)s")
+    handler.setFormatter(formatter)
+    log.addHandler(handler)
+    return log
+LOG = configure_logging()
+
+
+def get_path(relpath):
+    """Get the absolute path for something relative to this file."""
+    return os.path.abspath(
+        os.path.join(
+            os.path.dirname(os.path.dirname(__file__)), relpath))
+
+
+def assign_fix_committed_to_next(testtools, next_milestone):
+    """Find all 'Fix Committed' and make sure they are in 'next'."""
+    fixed_bugs = list(testtools.searchTasks(status=FIX_COMMITTED))
+    for task in fixed_bugs:
+        LOG.debug("%s" % (task.title,))
+        if task.milestone != next_milestone:
+            task.milestone = next_milestone
+            LOG.info("Re-assigning %s" % (task.title,))
+            task.lp_save()
+
+
+def rename_milestone(next_milestone, new_name):
+    """Rename 'next_milestone' to 'new_name'."""
+    LOG.info("Renaming %s to %s" % (next_milestone.name, new_name))
+    next_milestone.name = new_name
+    next_milestone.lp_save()
+
+
+def get_release_notes_and_changelog(news_path):
+    release_notes = []
+    changelog = []
+    state = None
+    last_line = None
+
+    def is_heading_marker(line, marker_char):
+        return line and line == marker_char * len(line)
+
+    LOG.debug("Loading NEWS from %s" % (news_path,))
+    with open(news_path, 'r') as news:
+        for line in news:
+            line = line.strip()
+            if state is None:
+                if (is_heading_marker(line, '~') and
+                    not last_line.startswith('NEXT')):
+                    milestone_name = last_line
+                    state = 'release-notes'
+                else:
+                    last_line = line
+            elif state == 'title':
+                # The line after the title is a heading marker line, so we
+                # ignore it and change state. That which follows are the
+                # release notes.
+                state = 'release-notes'
+            elif state == 'release-notes':
+                if is_heading_marker(line, '-'):
+                    state = 'changelog'
+                    # Last line in the release notes is actually the first
+                    # line of the changelog.
+                    changelog = [release_notes.pop(), line]
+                else:
+                    release_notes.append(line)
+            elif state == 'changelog':
+                if is_heading_marker(line, '~'):
+                    # Last line in changelog is actually the first line of the
+                    # next section.
+                    changelog.pop()
+                    break
+                else:
+                    changelog.append(line)
+            else:
+                raise ValueError("Couldn't parse NEWS")
+
+    release_notes = '\n'.join(release_notes).strip() + '\n'
+    changelog = '\n'.join(changelog).strip() + '\n'
+    return milestone_name, release_notes, changelog
+
+
+def release_milestone(milestone, release_notes, changelog):
+    date_released = datetime.now(tz=UTC)
+    LOG.info(
+        "Releasing milestone: %s, date %s" % (milestone.name, date_released))
+    release = milestone.createProductRelease(
+        date_released=date_released,
+        changelog=changelog,
+        release_notes=release_notes,
+        )
+    milestone.is_active = False
+    milestone.lp_save()
+    return release
+
+
+def create_milestone(series, name):
+    """Create a new milestone in the same series as 'release_milestone'."""
+    LOG.info("Creating milestone %s in series %s" % (name, series.name))
+    return series.newMilestone(name=name)
+
+
+def close_fixed_bugs(milestone):
+    tasks = list(milestone.searchTasks())
+    for task in tasks:
+        LOG.debug("Found %s" % (task.title,))
+        if task.status == FIX_COMMITTED:
+            LOG.info("Closing %s" % (task.title,))
+            task.status = FIX_RELEASED
+        else:
+            LOG.warning(
+                "Bug not fixed, removing from milestone: %s" % (task.title,))
+            task.milestone = None
+        task.lp_save()
+
+
+def upload_tarball(release, tarball_path):
+    with open(tarball_path) as tarball:
+        tarball_content = tarball.read()
+    sig_path = tarball_path + '.asc'
+    with open(sig_path) as sig:
+        sig_content = sig.read()
+    tarball_name = os.path.basename(tarball_path)
+    LOG.info("Uploading tarball: %s" % (tarball_path,))
+    release.add_file(
+        file_type=CODE_RELEASE_TARBALL,
+        file_content=tarball_content, filename=tarball_name,
+        signature_content=sig_content,
+        signature_filename=sig_path,
+        content_type="application/x-gzip; charset=binary")
+
+
+def release_project(launchpad, project_name, next_milestone_name):
+    testtools = launchpad.projects[project_name]
+    next_milestone = testtools.getMilestone(name=next_milestone_name)
+    release_name, release_notes, changelog = get_release_notes_and_changelog(
+        get_path('NEWS'))
+    LOG.info("Releasing %s %s" % (project_name, release_name))
+    # Since reversing these operations is hard, and inspecting errors from
+    # Launchpad is also difficult, do some looking before leaping.
+    errors = []
+    tarball_path = get_path('dist/%s-%s.tar.gz' % (project_name, release_name,))
+    if not os.path.isfile(tarball_path):
+        errors.append("%s does not exist" % (tarball_path,))
+    if not os.path.isfile(tarball_path + '.asc'):
+        errors.append("%s does not exist" % (tarball_path + '.asc',))
+    if testtools.getMilestone(name=release_name):
+        errors.append("Milestone %s exists on %s" % (release_name, project_name))
+    if errors:
+        for error in errors:
+            LOG.error(error)
+        return 1
+    assign_fix_committed_to_next(testtools, next_milestone)
+    rename_milestone(next_milestone, release_name)
+    release = release_milestone(next_milestone, release_notes, changelog)
+    upload_tarball(release, tarball_path)
+    create_milestone(next_milestone.series_target, next_milestone_name)
+    close_fixed_bugs(next_milestone)
+    return 0
+
+
+def main(args):
+    launchpad = Launchpad.login_with(
+        APP_NAME, SERVICE_ROOT, CACHE_DIR, credentials_file='.lp_creds')
+    return release_project(launchpad, PROJECT_NAME, NEXT_MILESTONE_NAME)
+
+
+if __name__ == '__main__':
+    sys.exit(main(sys.argv))
diff --git a/third_party/testtools/scripts/all-pythons b/third_party/testtools/scripts/all-pythons
new file mode 100755
index 0000000..10fd6de
--- /dev/null
+++ b/third_party/testtools/scripts/all-pythons
@@ -0,0 +1,93 @@
+#!/usr/bin/python
+
+"""Run the testtools test suite for all supported Pythons.
+
+Prints output as a subunit test suite. If anything goes to stderr, that is
+treated as a test error. If a Python is not available, then it is skipped.
+"""
+
+from datetime import datetime
+import os
+import subprocess
+import sys
+
+import subunit
+from subunit import (
+    iso8601,
+    _make_stream_binary,
+    TestProtocolClient,
+    TestProtocolServer,
+    )
+from testtools import (
+    PlaceHolder,
+    TestCase,
+    )
+from testtools.compat import BytesIO
+from testtools.content import text_content
+
+
+ROOT = os.path.dirname(os.path.dirname(__file__))
+
+
+def run_for_python(version, result, tests):
+    if not tests:
+        tests = ['testtools.tests.test_suite']
+    # XXX: This could probably be broken up and put into subunit.
+    python = 'python%s' % (version,)
+    # XXX: Correct API, but subunit doesn't support it. :(
+    # result.tags(set(python), set())
+    result.time(now())
+    test = PlaceHolder(''.join(c for c in python if c != '.'))
+    process = subprocess.Popen(
+        '%s -c pass' % (python,), shell=True,
+        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    process.communicate()
+
+    if process.returncode:
+        result.startTest(test)
+        result.addSkip(test, reason='%s not available' % (python,))
+        result.stopTest(test)
+        return
+
+    env = os.environ.copy()
+    if env.get('PYTHONPATH', None):
+        env['PYTHONPATH'] = os.pathsep.join([ROOT, env['PYTHONPATH']])
+    else:
+        env['PYTHONPATH'] = ROOT
+    result.time(now())
+    protocol = TestProtocolServer(result)
+    subunit_path = os.path.join(os.path.dirname(subunit.__file__), 'run.py')
+    cmd = [
+        python,
+        '-W', 'ignore:Module testtools was already imported',
+        subunit_path]
+    cmd.extend(tests)
+    process = subprocess.Popen(
+        cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
+    _make_stream_binary(process.stdout)
+    _make_stream_binary(process.stderr)
+    # XXX: This buffers everything. Bad for memory, bad for getting progress
+    # on jenkins.
+    output, error = process.communicate()
+    protocol.readFrom(BytesIO(output))
+    if error:
+        result.startTest(test)
+        result.addError(test, details={
+            'stderr': text_content(error),
+           })
+        result.stopTest(test)
+    result.time(now())
+    # XXX: Correct API, but subunit doesn't support it. :(
+    #result.tags(set(), set(python))
+
+
+def now():
+    return datetime.utcnow().replace(tzinfo=iso8601.Utc())
+
+
+
+if __name__ == '__main__':
+    sys.path.append(ROOT)
+    result = TestProtocolClient(sys.stdout)
+    for version in '2.6 2.7 3.0 3.1 3.2'.split():
+        run_for_python(version, result, sys.argv[1:])
diff --git a/third_party/testtools/scripts/update-rtfd b/third_party/testtools/scripts/update-rtfd
new file mode 100755
index 0000000..92a19da
--- /dev/null
+++ b/third_party/testtools/scripts/update-rtfd
@@ -0,0 +1,11 @@
+#!/usr/bin/python
+
+from StringIO import StringIO
+from urllib2 import urlopen
+
+
+WEB_HOOK = 'http://readthedocs.org/build/588'
+
+
+if __name__ == '__main__':
+    urlopen(WEB_HOOK, data='  ')
diff --git a/third_party/testtools/setup.cfg b/third_party/testtools/setup.cfg
new file mode 100644
index 0000000..9f95add
--- /dev/null
+++ b/third_party/testtools/setup.cfg
@@ -0,0 +1,4 @@
+[test]
+test_module = testtools.tests
+buffer=1
+catch=1
diff --git a/third_party/testtools/setup.py b/third_party/testtools/setup.py
new file mode 100755
index 0000000..dacbf91
--- /dev/null
+++ b/third_party/testtools/setup.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+"""Distutils installer for testtools."""
+
+from setuptools import setup
+from distutils.command.build_py import build_py
+import email
+import os
+import sys
+
+import testtools
+cmd_class = {}
+if getattr(testtools, 'TestCommand', None) is not None:
+    cmd_class['test'] = testtools.TestCommand
+
+
+class testtools_build_py(build_py):
+    def build_module(self, module, module_file, package):
+        if sys.version_info >= (3,) and module == '_compat2x':
+            return
+        return build_py.build_module(self, module, module_file, package)
+cmd_class['build_py'] = testtools_build_py
+
+
+def get_version_from_pkg_info():
+    """Get the version from PKG-INFO file if we can."""
+    pkg_info_path = os.path.join(os.path.dirname(__file__), 'PKG-INFO')
+    try:
+        pkg_info_file = open(pkg_info_path, 'r')
+    except (IOError, OSError):
+        return None
+    try:
+        pkg_info = email.message_from_file(pkg_info_file)
+    except email.MessageError:
+        return None
+    return pkg_info.get('Version', None)
+
+
+def get_version():
+    """Return the version of testtools that we are building."""
+    version = '.'.join(
+        str(component) for component in testtools.__version__[0:3])
+    phase = testtools.__version__[3]
+    if phase == 'final':
+        return version
+    pkg_info_version = get_version_from_pkg_info()
+    if pkg_info_version:
+        return pkg_info_version
+    # Apparently if we just say "snapshot" then distribute won't accept it
+    # as satisfying versioned dependencies. This is a problem for the
+    # daily build version.
+    return "snapshot-%s" % (version,)
+
+
+def get_long_description():
+    manual_path = os.path.join(
+        os.path.dirname(__file__), 'doc/overview.rst')
+    return open(manual_path).read()
+
+
+setup(name='testtools',
+      author='Jonathan M. Lange',
+      author_email='jml+testtools at mumak.net',
+      url='https://github.com/testing-cabal/testtools',
+      description=('Extensions to the Python standard library unit testing '
+                   'framework'),
+      long_description=get_long_description(),
+      version=get_version(),
+      classifiers=["License :: OSI Approved :: MIT License",
+        "Programming Language :: Python :: 3",
+        ],
+      packages=[
+        'testtools',
+        'testtools.matchers',
+        'testtools.testresult',
+        'testtools.tests',
+        'testtools.tests.matchers',
+        ],
+      cmdclass=cmd_class,
+      zip_safe=False,
+      install_requires=[
+        'extras',
+        # 'mimeparse' has not been uploaded by the maintainer with Python3 compat
+        # but someone kindly uploaded a fixed version as 'python-mimeparse'.
+        'python-mimeparse',
+        ],
+      )
diff --git a/third_party/testtools/testtools/__init__.py b/third_party/testtools/testtools/__init__.py
new file mode 100644
index 0000000..973083a
--- /dev/null
+++ b/third_party/testtools/testtools/__init__.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+__all__ = [
+    'clone_test_with_new_id',
+    'CopyStreamResult',
+    'ConcurrentTestSuite',
+    'ConcurrentStreamTestSuite',
+    'DecorateTestCaseResult',
+    'ErrorHolder',
+    'ExpectedException',
+    'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
+    'FixtureSuite',
+    'iterate_tests',
+    'MultipleExceptions',
+    'MultiTestResult',
+    'PlaceHolder',
+    'run_test_with',
+    'Tagger',
+    'TestCase',
+    'TestCommand',
+    'TestByTestResult',
+    'TestResult',
+    'TestResultDecorator',
+    'TextTestResult',
+    'RunTest',
+    'skip',
+    'skipIf',
+    'skipUnless',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamResultRouter',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
+    'TestControl',
+    'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
+    'try_import',
+    'try_imports',
+    ]
+
+# Compat - removal announced in 0.9.25.
+try:
+    from extras import (
+        try_import,
+        try_imports,
+        )
+except ImportError:
+    # Support reading __init__ for __version__ without extras, because pip does
+    # not support setup_requires.
+    pass
+else:
+
+    from testtools.matchers._impl import (
+        Matcher,
+        )
+# Shut up, pyflakes. We are importing for documentation, not for namespacing.
+    Matcher
+
+    from testtools.runtest import (
+        MultipleExceptions,
+        RunTest,
+        )
+    from testtools.testcase import (
+        DecorateTestCaseResult,
+        ErrorHolder,
+        ExpectedException,
+        PlaceHolder,
+        TestCase,
+        clone_test_with_new_id,
+        run_test_with,
+        skip,
+        skipIf,
+        skipUnless,
+        )
+    from testtools.testresult import (
+        CopyStreamResult,
+        ExtendedToOriginalDecorator,
+        ExtendedToStreamDecorator,
+        MultiTestResult,
+        StreamFailFast,
+        StreamResult,
+        StreamResultRouter,
+        StreamSummary,
+        StreamTagger,
+        StreamToDict,
+        StreamToExtendedDecorator,
+        StreamToQueue,
+        Tagger,
+        TestByTestResult,
+        TestControl,
+        TestResult,
+        TestResultDecorator,
+        TextTestResult,
+        ThreadsafeForwardingResult,
+        TimestampingStreamResult,
+        )
+    from testtools.testsuite import (
+        ConcurrentTestSuite,
+        ConcurrentStreamTestSuite,
+        FixtureSuite,
+        iterate_tests,
+        )
+    from testtools.distutilscmd import (
+        TestCommand,
+        )
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)."  Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (1, 1, 0, 'final', 0)
diff --git a/third_party/testtools/testtools/_compat2x.py b/third_party/testtools/testtools/_compat2x.py
new file mode 100644
index 0000000..61aba67
--- /dev/null
+++ b/third_party/testtools/testtools/_compat2x.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+"""Compatibility helpers that are valid syntax in Python 2.x.
+
+Only add things here if they *only* work in Python 2.x or are Python 2
+alternatives to things that *only* work in Python 3.x.
+"""
+
+__all__ = [
+    'reraise',
+    ]
+
+
+def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
+    """Re-raise an exception received from sys.exc_info() or similar."""
+    raise exc_class, exc_obj, exc_tb
diff --git a/third_party/testtools/testtools/_compat3x.py b/third_party/testtools/testtools/_compat3x.py
new file mode 100644
index 0000000..2bfdefd
--- /dev/null
+++ b/third_party/testtools/testtools/_compat3x.py
@@ -0,0 +1,16 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+"""Compatibility helpers that are valid syntax in Python 3.x.
+
+Only add things here if they *only* work in Python 3.x or are Python 3
+alternatives to things that *only* work in Python 2.x.
+"""
+
+__all__ = [
+    'reraise',
+    ]
+
+
+def reraise(exc_class, exc_obj, exc_tb, _marker=object()):
+    """Re-raise an exception received from sys.exc_info() or similar."""
+    raise exc_obj.with_traceback(exc_tb)
diff --git a/third_party/testtools/testtools/_spinner.py b/third_party/testtools/testtools/_spinner.py
new file mode 100644
index 0000000..baf455a
--- /dev/null
+++ b/third_party/testtools/testtools/_spinner.py
@@ -0,0 +1,316 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Evil reactor-spinning logic for running Twisted tests.
+
+This code is highly experimental, liable to change and not to be trusted.  If
+you couldn't write this yourself, you should not be using it.
+"""
+
+__all__ = [
+    'DeferredNotFired',
+    'extract_result',
+    'NoResultError',
+    'not_reentrant',
+    'ReentryError',
+    'Spinner',
+    'StaleJunkError',
+    'TimeoutError',
+    'trap_unhandled_errors',
+    ]
+
+import signal
+
+from testtools.monkey import MonkeyPatcher
+
+from twisted.internet import defer
+from twisted.internet.base import DelayedCall
+from twisted.internet.interfaces import IReactorThreads
+from twisted.python.failure import Failure
+from twisted.python.util import mergeFunctionMetadata
+
+
+class ReentryError(Exception):
+    """Raised when we try to re-enter a function that forbids it."""
+
+    def __init__(self, function):
+        Exception.__init__(self,
+            "%r in not re-entrant but was called within a call to itself."
+            % (function,))
+
+
+def not_reentrant(function, _calls={}):
+    """Decorates a function as not being re-entrant.
+
+    The decorated function will raise an error if called from within itself.
+    """
+    def decorated(*args, **kwargs):
+        if _calls.get(function, False):
+            raise ReentryError(function)
+        _calls[function] = True
+        try:
+            return function(*args, **kwargs)
+        finally:
+            _calls[function] = False
+    return mergeFunctionMetadata(function, decorated)
+
+
+class DeferredNotFired(Exception):
+    """Raised when we extract a result from a Deferred that's not fired yet."""
+
+
+def extract_result(deferred):
+    """Extract the result from a fired deferred.
+
+    It can happen that you have an API that returns Deferreds for
+    compatibility with Twisted code, but is in fact synchronous, i.e. the
+    Deferreds it returns have always fired by the time it returns.  In this
+    case, you can use this function to convert the result back into the usual
+    form for a synchronous API, i.e. the result itself or a raised exception.
+
+    It would be very bad form to use this as some way of checking if a
+    Deferred has fired.
+    """
+    failures = []
+    successes = []
+    deferred.addCallbacks(successes.append, failures.append)
+    if len(failures) == 1:
+        failures[0].raiseException()
+    elif len(successes) == 1:
+        return successes[0]
+    else:
+        raise DeferredNotFired("%r has not fired yet." % (deferred,))
+
+
+def trap_unhandled_errors(function, *args, **kwargs):
+    """Run a function, trapping any unhandled errors in Deferreds.
+
+    Assumes that 'function' will have handled any errors in Deferreds by the
+    time it is complete.  This is almost never true of any Twisted code, since
+    you can never tell when someone has added an errback to a Deferred.
+
+    If 'function' raises, then don't bother doing any unhandled error
+    jiggery-pokery, since something horrible has probably happened anyway.
+
+    :return: A tuple of '(result, error)', where 'result' is the value
+        returned by 'function' and 'error' is a list of 'defer.DebugInfo'
+        objects that have unhandled errors in Deferreds.
+    """
+    real_DebugInfo = defer.DebugInfo
+    debug_infos = []
+    def DebugInfo():
+        info = real_DebugInfo()
+        debug_infos.append(info)
+        return info
+    defer.DebugInfo = DebugInfo
+    try:
+        result = function(*args, **kwargs)
+    finally:
+        defer.DebugInfo = real_DebugInfo
+    errors = []
+    for info in debug_infos:
+        if info.failResult is not None:
+            errors.append(info)
+            # Disable the destructor that logs to error. We are already
+            # catching the error here.
+            info.__del__ = lambda: None
+    return result, errors
+
+
+class TimeoutError(Exception):
+    """Raised when run_in_reactor takes too long to run a function."""
+
+    def __init__(self, function, timeout):
+        Exception.__init__(self,
+            "%r took longer than %s seconds" % (function, timeout))
+
+
+class NoResultError(Exception):
+    """Raised when the reactor has stopped but we don't have any result."""
+
+    def __init__(self):
+        Exception.__init__(self,
+            "Tried to get test's result from Deferred when no result is "
+            "available.  Probably means we received SIGINT or similar.")
+
+
+class StaleJunkError(Exception):
+    """Raised when there's junk in the spinner from a previous run."""
+
+    def __init__(self, junk):
+        Exception.__init__(self,
+            "There was junk in the spinner from a previous run. "
+            "Use clear_junk() to clear it out: %r" % (junk,))
+
+
+class Spinner(object):
+    """Spin the reactor until a function is done.
+
+    This class emulates the behaviour of twisted.trial in that it grotesquely
+    and horribly spins the Twisted reactor while a function is running, and
+    then kills the reactor when that function is complete and all the
+    callbacks in its chains are done.
+    """
+
+    _UNSET = object()
+
+    # Signals that we save and restore for each spin.
+    _PRESERVED_SIGNALS = [
+        'SIGINT',
+        'SIGTERM',
+        'SIGCHLD',
+        ]
+
+    # There are many APIs within Twisted itself where a Deferred fires but
+    # leaves cleanup work scheduled for the reactor to do.  Arguably, many of
+    # these are bugs.  As such, we provide a facility to iterate the reactor
+    # event loop a number of times after every call, in order to shake out
+    # these buggy-but-commonplace events.  The default is 0, because that is
+    # the ideal, and it actually works for many cases.
+    _OBLIGATORY_REACTOR_ITERATIONS = 0
+
+    def __init__(self, reactor, debug=False):
+        """Construct a Spinner.
+
+        :param reactor: A Twisted reactor.
+        :param debug: Whether or not to enable Twisted's debugging.  Defaults
+            to False.
+        """
+        self._reactor = reactor
+        self._timeout_call = None
+        self._success = self._UNSET
+        self._failure = self._UNSET
+        self._saved_signals = []
+        self._junk = []
+        self._debug = debug
+
+    def _cancel_timeout(self):
+        if self._timeout_call:
+            self._timeout_call.cancel()
+
+    def _get_result(self):
+        if self._failure is not self._UNSET:
+            self._failure.raiseException()
+        if self._success is not self._UNSET:
+            return self._success
+        raise NoResultError()
+
+    def _got_failure(self, result):
+        self._cancel_timeout()
+        self._failure = result
+
+    def _got_success(self, result):
+        self._cancel_timeout()
+        self._success = result
+
+    def _stop_reactor(self, ignored=None):
+        """Stop the reactor!"""
+        self._reactor.crash()
+
+    def _timed_out(self, function, timeout):
+        e = TimeoutError(function, timeout)
+        self._failure = Failure(e)
+        self._stop_reactor()
+
+    def _clean(self):
+        """Clean up any junk in the reactor.
+
+        Will always iterate the reactor a number of times equal to
+        ``Spinner._OBLIGATORY_REACTOR_ITERATIONS``.  This is to work around
+        bugs in various Twisted APIs where a Deferred fires but still leaves
+        work (e.g. cancelling a call, actually closing a connection) for the
+        reactor to do.
+        """
+        for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
+            self._reactor.iterate(0)
+        junk = []
+        for delayed_call in self._reactor.getDelayedCalls():
+            delayed_call.cancel()
+            junk.append(delayed_call)
+        for selectable in self._reactor.removeAll():
+            # Twisted sends a 'KILL' signal to selectables that provide
+            # IProcessTransport.  Since only _dumbwin32proc processes do this,
+            # we aren't going to bother.
+            junk.append(selectable)
+        if IReactorThreads.providedBy(self._reactor):
+            if self._reactor.threadpool is not None:
+                self._reactor._stopThreadPool()
+        self._junk.extend(junk)
+        return junk
+
+    def clear_junk(self):
+        """Clear out our recorded junk.
+
+        :return: Whatever junk was there before.
+        """
+        junk = self._junk
+        self._junk = []
+        return junk
+
+    def get_junk(self):
+        """Return any junk that has been found on the reactor."""
+        return self._junk
+
+    def _save_signals(self):
+        available_signals = [
+            getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
+        self._saved_signals = [
+            (sig, signal.getsignal(sig)) for sig in available_signals if sig]
+
+    def _restore_signals(self):
+        for sig, hdlr in self._saved_signals:
+            signal.signal(sig, hdlr)
+        self._saved_signals = []
+
+    @not_reentrant
+    def run(self, timeout, function, *args, **kwargs):
+        """Run 'function' in a reactor.
+
+        If 'function' returns a Deferred, the reactor will keep spinning until
+        the Deferred fires and its chain completes or until the timeout is
+        reached -- whichever comes first.
+
+        :raise TimeoutError: If 'timeout' is reached before the Deferred
+            returned by 'function' has completed its callback chain.
+        :raise NoResultError: If the reactor is somehow interrupted before
+            the Deferred returned by 'function' has completed its callback
+            chain.
+        :raise StaleJunkError: If there's junk in the spinner from a previous
+            run.
+        :return: Whatever is at the end of the function's callback chain.  If
+            it's an error, then raise that.
+        """
+        debug = MonkeyPatcher()
+        if self._debug:
+            debug.add_patch(defer.Deferred, 'debug', True)
+            debug.add_patch(DelayedCall, 'debug', True)
+        debug.patch()
+        try:
+            junk = self.get_junk()
+            if junk:
+                raise StaleJunkError(junk)
+            self._save_signals()
+            self._timeout_call = self._reactor.callLater(
+                timeout, self._timed_out, function, timeout)
+            # Calling 'stop' on the reactor will make it impossible to
+            # re-start the reactor.  Since the default signal handlers for
+            # TERM, BREAK and INT all call reactor.stop(), we'll patch it over
+            # with crash.  XXX: It might be a better idea to either install
+            # custom signal handlers or to override the methods that are
+            # Twisted's signal handlers.
+            stop, self._reactor.stop = self._reactor.stop, self._reactor.crash
+            def run_function():
+                d = defer.maybeDeferred(function, *args, **kwargs)
+                d.addCallbacks(self._got_success, self._got_failure)
+                d.addBoth(self._stop_reactor)
+            try:
+                self._reactor.callWhenRunning(run_function)
+                self._reactor.run()
+            finally:
+                self._reactor.stop = stop
+                self._restore_signals()
+            try:
+                return self._get_result()
+            finally:
+                self._clean()
+        finally:
+            debug.restore()
diff --git a/third_party/testtools/testtools/assertions.py b/third_party/testtools/testtools/assertions.py
new file mode 100644
index 0000000..87fa74b
--- /dev/null
+++ b/third_party/testtools/testtools/assertions.py
@@ -0,0 +1,22 @@
+from testtools.matchers import (
+    Annotate,
+    MismatchError,
+    )
+
+
+def assert_that(matchee, matcher, message='', verbose=False):
+    """Assert that matchee is matched by matcher.
+
+    This should only be used when you need to use a function based
+    matcher, assertThat in Testtools.Testcase is prefered and has more
+    features
+
+    :param matchee: An object to match with matcher.
+    :param matcher: An object meeting the testtools.Matcher protocol.
+    :raises MismatchError: When matcher does not match thing.
+    """
+    matcher = Annotate.if_message(message, matcher)
+    mismatch = matcher.match(matchee)
+    if not mismatch:
+        return
+    raise MismatchError(matchee, matcher, mismatch, verbose)
diff --git a/third_party/testtools/testtools/compat.py b/third_party/testtools/testtools/compat.py
new file mode 100644
index 0000000..d0a00d1
--- /dev/null
+++ b/third_party/testtools/testtools/compat.py
@@ -0,0 +1,386 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+"""Compatibility support for python 2 and 3."""
+
+__metaclass__ = type
+__all__ = [
+    '_b',
+    '_u',
+    'advance_iterator',
+    'all',
+    'BytesIO',
+    'classtypes',
+    'isbaseexception',
+    'istext',
+    'str_is_unicode',
+    'StringIO',
+    'reraise',
+    'unicode_output_stream',
+    ]
+
+import codecs
+import io
+import linecache
+import locale
+import os
+import re
+import sys
+import traceback
+import unicodedata
+
+from extras import try_imports
+
+BytesIO = try_imports(['StringIO.StringIO', 'io.BytesIO'])
+StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
+
+try:
+    from testtools import _compat2x as _compat
+except (SyntaxError, ImportError):
+    from testtools import _compat3x as _compat
+
+reraise = _compat.reraise
+
+
+__u_doc = """A function version of the 'u' prefix.
+
+This is needed becayse the u prefix is not usable in Python 3 but is required
+in Python 2 to get a unicode object.
+
+To migrate code that was written as u'\u1234' in Python 2 to 2+3 change
+it to be _u('\u1234'). The Python 3 interpreter will decode it
+appropriately and the no-op _u for Python 3 lets it through, in Python
+2 we then call unicode-escape in the _u function.
+"""
+
+if sys.version_info > (3, 0):
+    import builtins
+    def _u(s):
+        return s
+    _r = ascii
+    def _b(s):
+        """A byte literal."""
+        return s.encode("latin-1")
+    advance_iterator = next
+    # GZ 2011-08-24: Seems istext() is easy to misuse and makes for bad code.
+    def istext(x):
+        return isinstance(x, str)
+    def classtypes():
+        return (type,)
+    str_is_unicode = True
+else:
+    import __builtin__ as builtins
+    def _u(s):
+        # The double replace mangling going on prepares the string for
+        # unicode-escape - \foo is preserved, \u and \U are decoded.
+        return (s.replace("\\", "\\\\").replace("\\\\u", "\\u")
+            .replace("\\\\U", "\\U").decode("unicode-escape"))
+    _r = repr
+    def _b(s):
+        return s
+    advance_iterator = lambda it: it.next()
+    def istext(x):
+        return isinstance(x, basestring)
+    def classtypes():
+        import types
+        return (type, types.ClassType)
+    str_is_unicode = sys.platform == "cli"
+
+_u.__doc__ = __u_doc
+
+
+# GZ 2011-08-24: Using isinstance checks like this encourages bad interfaces,
+#                there should be better ways to write code needing this.
+if not issubclass(getattr(builtins, "bytes", str), str):
+    def _isbytes(x):
+        return isinstance(x, bytes)
+else:
+    # Never return True on Pythons that provide the name but not the real type
+    def _isbytes(x):
+        return False
+
+
+def _slow_escape(text):
+    """Escape unicode ``text`` leaving printable characters unmodified
+
+    The behaviour emulates the Python 3 implementation of repr, see
+    unicode_repr in unicodeobject.c and isprintable definition.
+
+    Because this iterates over the input a codepoint at a time, it's slow, and
+    does not handle astral characters correctly on Python builds with 16 bit
+    rather than 32 bit unicode type.
+    """
+    output = []
+    for c in text:
+        o = ord(c)
+        if o < 256:
+            if o < 32 or 126 < o < 161:
+                output.append(c.encode("unicode-escape"))
+            elif o == 92:
+                # Separate due to bug in unicode-escape codec in Python 2.4
+                output.append("\\\\")
+            else:
+                output.append(c)
+        else:
+            # To get correct behaviour would need to pair up surrogates here
+            if unicodedata.category(c)[0] in "CZ":
+                output.append(c.encode("unicode-escape"))
+            else:
+                output.append(c)
+    return "".join(output)
+
+
+def text_repr(text, multiline=None):
+    """Rich repr for ``text`` returning unicode, triple quoted if ``multiline``.
+    """
+    is_py3k = sys.version_info > (3, 0)
+    nl = _isbytes(text) and bytes((0xA,)) or "\n"
+    if multiline is None:
+        multiline = nl in text
+    if not multiline and (is_py3k or not str_is_unicode and type(text) is str):
+        # Use normal repr for single line of unicode on Python 3 or bytes
+        return repr(text)
+    prefix = repr(text[:0])[:-2]
+    if multiline:
+        # To escape multiline strings, split and process each line in turn,
+        # making sure that quotes are not escaped.
+        if is_py3k:
+            offset = len(prefix) + 1
+            lines = []
+            for l in text.split(nl):
+                r = repr(l)
+                q = r[-1]
+                lines.append(r[offset:-1].replace("\\" + q, q))
+        elif not str_is_unicode and isinstance(text, str):
+            lines = [l.encode("string-escape").replace("\\'", "'")
+                for l in text.split("\n")]
+        else:
+            lines = [_slow_escape(l) for l in text.split("\n")]
+        # Combine the escaped lines and append two of the closing quotes,
+        # then iterate over the result to escape triple quotes correctly.
+        _semi_done = "\n".join(lines) + "''"
+        p = 0
+        while True:
+            p = _semi_done.find("'''", p)
+            if p == -1:
+                break
+            _semi_done = "\\".join([_semi_done[:p], _semi_done[p:]])
+            p += 2
+        return "".join([prefix, "'''\\\n", _semi_done, "'"])
+    escaped_text = _slow_escape(text)
+    # Determine which quote character to use and if one gets prefixed with a
+    # backslash following the same logic Python uses for repr() on strings
+    quote = "'"
+    if "'" in text:
+        if '"' in text:
+            escaped_text = escaped_text.replace("'", "\\'")
+        else:
+            quote = '"'
+    return "".join([prefix, quote, escaped_text, quote])
+
+
+def unicode_output_stream(stream):
+    """Get wrapper for given stream that writes any unicode without exception
+
+    Characters that can't be coerced to the encoding of the stream, or 'ascii'
+    if valid encoding is not found, will be replaced. The original stream may
+    be returned in situations where a wrapper is determined unneeded.
+
+    The wrapper only allows unicode to be written, not non-ascii bytestrings,
+    which is a good thing to ensure sanity and sanitation.
+    """
+    if (sys.platform == "cli" or
+        isinstance(stream, (io.TextIOWrapper, io.StringIO))):
+        # Best to never encode before writing in IronPython, or if it is
+        # already a TextIO [which in the io library has no encoding
+        # attribute).
+        return stream
+    try:
+        writer = codecs.getwriter(stream.encoding or "")
+    except (AttributeError, LookupError):
+        return codecs.getwriter("ascii")(stream, "replace")
+    if writer.__module__.rsplit(".", 1)[1].startswith("utf"):
+        # The current stream has a unicode encoding so no error handler is needed
+        if sys.version_info > (3, 0):
+            return stream
+        return writer(stream)
+    if sys.version_info > (3, 0):
+        # Python 3 doesn't seem to make this easy, handle a common case
+        try:
+            return stream.__class__(stream.buffer, stream.encoding, "replace",
+                stream.newlines, stream.line_buffering)
+        except AttributeError:
+            pass
+    return writer(stream, "replace")
+
+
+# The default source encoding is actually "iso-8859-1" until Python 2.5 but
+# using non-ascii causes a deprecation warning in 2.4 and it's cleaner to
+# treat all versions the same way
+_default_source_encoding = "ascii"
+
+# Pattern specified in <http://www.python.org/dev/peps/pep-0263/>
+_cookie_search=re.compile("coding[:=]\s*([-\w.]+)").search
+
+def _detect_encoding(lines):
+    """Get the encoding of a Python source file from a list of lines as bytes
+
+    This function does less than tokenize.detect_encoding added in Python 3 as
+    it does not attempt to raise a SyntaxError when the interpreter would, it
+    just wants the encoding of a source file Python has already compiled and
+    determined is valid.
+    """
+    if not lines:
+        return _default_source_encoding
+    if lines[0].startswith("\xef\xbb\xbf"):
+        # Source starting with UTF-8 BOM is either UTF-8 or a SyntaxError
+        return "utf-8"
+    # Only the first two lines of the source file are examined
+    magic = _cookie_search("".join(lines[:2]))
+    if magic is None:
+        return _default_source_encoding
+    encoding = magic.group(1)
+    try:
+        codecs.lookup(encoding)
+    except LookupError:
+        # Some codecs raise something other than LookupError if they don't
+        # support the given error handler, but not the text ones that could
+        # actually be used for Python source code
+        return _default_source_encoding
+    return encoding
+
+
+class _EncodingTuple(tuple):
+    """A tuple type that can have an encoding attribute smuggled on"""
+
+
+def _get_source_encoding(filename):
+    """Detect, cache and return the encoding of Python source at filename"""
+    try:
+        return linecache.cache[filename].encoding
+    except (AttributeError, KeyError):
+        encoding = _detect_encoding(linecache.getlines(filename))
+        if filename in linecache.cache:
+            newtuple = _EncodingTuple(linecache.cache[filename])
+            newtuple.encoding = encoding
+            linecache.cache[filename] = newtuple
+        return encoding
+
+
+def _get_exception_encoding():
+    """Return the encoding we expect messages from the OS to be encoded in"""
+    if os.name == "nt":
+        # GZ 2010-05-24: Really want the codepage number instead, the error
+        #                handling of standard codecs is more deterministic
+        return "mbcs"
+    # GZ 2010-05-23: We need this call to be after initialisation, but there's
+    #                no benefit in asking more than once as it's a global
+    #                setting that can change after the message is formatted.
+    return locale.getlocale(locale.LC_MESSAGES)[1] or "ascii"
+
+
+def _exception_to_text(evalue):
+    """Try hard to get a sensible text value out of an exception instance"""
+    try:
+        return unicode(evalue)
+    except KeyboardInterrupt:
+        raise
+    except:
+        # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+        pass
+    try:
+        return str(evalue).decode(_get_exception_encoding(), "replace")
+    except KeyboardInterrupt:
+        raise
+    except:
+        # Apparently this is what traceback._some_str does. Sigh - RBC 20100623
+        pass
+    # Okay, out of ideas, let higher level handle it
+    return None
+
+
+def _format_stack_list(stack_lines):
+    """Format 'stack_lines' and return a list of unicode strings.
+
+    :param stack_lines: A list of filename, lineno, name, and line variables,
+        probably obtained by calling traceback.extract_tb or
+        traceback.extract_stack.
+    """
+    fs_enc = sys.getfilesystemencoding()
+    extracted_list = []
+    for filename, lineno, name, line in stack_lines:
+            extracted_list.append((
+                filename.decode(fs_enc, "replace"),
+                lineno,
+                name.decode("ascii", "replace"),
+                line and line.decode(
+                    _get_source_encoding(filename), "replace")))
+    return traceback.format_list(extracted_list)
+
+
+def _format_exception_only(eclass, evalue):
+    """Format the excption part of a traceback.
+
+    :param eclass: The type of the exception being formatted.
+    :param evalue: The exception instance.
+    :returns: A list of unicode strings.
+    """
+    list = []
+    if evalue is None:
+        # Is a (deprecated) string exception
+        list.append((eclass + "\n").decode("ascii", "replace"))
+        return list
+    if isinstance(evalue, SyntaxError):
+        # Avoid duplicating the special formatting for SyntaxError here,
+        # instead create a new instance with unicode filename and line
+        # Potentially gives duff spacing, but that's a pre-existing issue
+        try:
+            msg, (filename, lineno, offset, line) = evalue
+        except (TypeError, ValueError):
+            pass # Strange exception instance, fall through to generic code
+        else:
+            # Errors during parsing give the line from buffer encoded as
+            # latin-1 or utf-8 or the encoding of the file depending on the
+            # coding and whether the patch for issue #1031213 is applied, so
+            # give up on trying to decode it and just read the file again
+            if line:
+                bytestr = linecache.getline(filename, lineno)
+                if bytestr:
+                    if lineno == 1 and bytestr.startswith("\xef\xbb\xbf"):
+                        bytestr = bytestr[3:]
+                    line = bytestr.decode(
+                        _get_source_encoding(filename), "replace")
+                    del linecache.cache[filename]
+                else:
+                    line = line.decode("ascii", "replace")
+            if filename:
+                fs_enc = sys.getfilesystemencoding()
+                filename = filename.decode(fs_enc, "replace")
+            evalue = eclass(msg, (filename, lineno, offset, line))
+            list.extend(traceback.format_exception_only(eclass, evalue))
+            return list
+    sclass = eclass.__name__
+    svalue = _exception_to_text(evalue)
+    if svalue:
+        list.append("%s: %s\n" % (sclass, svalue))
+    elif svalue is None:
+        # GZ 2010-05-24: Not a great fallback message, but keep for the moment
+        list.append(_u("%s: <unprintable %s object>\n" % (sclass, sclass)))
+    else:
+        list.append(_u("%s\n" % sclass))
+    return list
+
+
+_TB_HEADER = _u('Traceback (most recent call last):\n')
+
+
+def _format_exc_info(eclass, evalue, tb, limit=None):
+    """Format a stack trace and the exception information as unicode
+
+    Compatibility function for Python 2 which ensures each component of a
+    traceback is correctly decoded according to its origins.
+
+    Based on traceback.format_exception and related functions.
+    """
+    return [_TB_HEADER] \
+        + _format_stack_list(traceback.extract_tb(tb, limit)) \
+        + _format_exception_only(eclass, evalue)
diff --git a/third_party/testtools/testtools/content.py b/third_party/testtools/testtools/content.py
new file mode 100644
index 0000000..101b631
--- /dev/null
+++ b/third_party/testtools/testtools/content.py
@@ -0,0 +1,383 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Content - a MIME-like Content object."""
+
+__all__ = [
+    'attach_file',
+    'Content',
+    'content_from_file',
+    'content_from_stream',
+    'json_content',
+    'text_content',
+    'TracebackContent',
+    ]
+
+import codecs
+import inspect
+import json
+import os
+import sys
+import traceback
+
+from extras import try_import
+
+from testtools.compat import (
+    _b,
+    _format_exception_only,
+    _format_stack_list,
+    _TB_HEADER,
+    _u,
+    istext,
+    str_is_unicode,
+)
+from testtools.content_type import ContentType, JSON, UTF8_TEXT
+
+
+functools = try_import('functools')
+
+_join_b = _b("").join
+
+
+DEFAULT_CHUNK_SIZE = 4096
+
+STDOUT_LINE = '\nStdout:\n%s'
+STDERR_LINE = '\nStderr:\n%s'
+
+
+def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
+    """Read 'stream' in chunks of 'chunk_size'.
+
+    :param stream: A file-like object to read from.
+    :param chunk_size: The size of each read from 'stream'.
+    :param seek_offset: If non-None, seek before iterating.
+    :param seek_whence: Pass through to the seek call, if seeking.
+    """
+    if seek_offset is not None:
+        stream.seek(seek_offset, seek_whence)
+    chunk = stream.read(chunk_size)
+    while chunk:
+        yield chunk
+        chunk = stream.read(chunk_size)
+
+
+class Content(object):
+    """A MIME-like Content object.
+
+    'Content' objects can be serialised to bytes using the iter_bytes method.
+    If the 'Content-Type' is recognised by other code, they are welcome to
+    look for richer contents that mere byte serialisation - for example in
+    memory object graphs etc. However, such code MUST be prepared to receive
+    a generic 'Content' object that has been reconstructed from a byte stream.
+
+    :ivar content_type: The content type of this Content.
+    """
+
+    def __init__(self, content_type, get_bytes):
+        """Create a ContentType."""
+        if None in (content_type, get_bytes):
+            raise ValueError("None not permitted in %r, %r" % (
+                content_type, get_bytes))
+        self.content_type = content_type
+        self._get_bytes = get_bytes
+
+    def __eq__(self, other):
+        return (self.content_type == other.content_type and
+            _join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
+
+    def as_text(self):
+        """Return all of the content as text.
+
+        This is only valid where ``iter_text`` is.  It will load all of the
+        content into memory.  Where this is a concern, use ``iter_text``
+        instead.
+        """
+        return _u('').join(self.iter_text())
+
+    def iter_bytes(self):
+        """Iterate over bytestrings of the serialised content."""
+        return self._get_bytes()
+
+    def iter_text(self):
+        """Iterate over the text of the serialised content.
+
+        This is only valid for text MIME types, and will use ISO-8859-1 if
+        no charset parameter is present in the MIME type. (This is somewhat
+        arbitrary, but consistent with RFC2617 3.7.1).
+
+        :raises ValueError: If the content type is not text/\*.
+        """
+        if self.content_type.type != "text":
+            raise ValueError("Not a text type %r" % self.content_type)
+        return self._iter_text()
+
+    def _iter_text(self):
+        """Worker for iter_text - does the decoding."""
+        encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
+        decoder = codecs.getincrementaldecoder(encoding)()
+        for bytes in self.iter_bytes():
+            yield decoder.decode(bytes)
+        final = decoder.decode(_b(''), True)
+        if final:
+            yield final
+
+    def __repr__(self):
+        return "<Content type=%r, value=%r>" % (
+            self.content_type, _join_b(self.iter_bytes()))
+
+
+class StackLinesContent(Content):
+    """Content object for stack lines.
+
+    This adapts a list of "preprocessed" stack lines into a 'Content' object.
+    The stack lines are most likely produced from ``traceback.extract_stack``
+    or ``traceback.extract_tb``.
+
+    text/x-traceback;language=python is used for the mime type, in order to
+    provide room for other languages to format their tracebacks differently.
+    """
+
+    # Whether or not to hide layers of the stack trace that are
+    # unittest/testtools internal code.  Defaults to True since the
+    # system-under-test is rarely unittest or testtools.
+    HIDE_INTERNAL_STACK = True
+
+    def __init__(self, stack_lines, prefix_content="", postfix_content=""):
+        """Create a StackLinesContent for ``stack_lines``.
+
+        :param stack_lines: A list of preprocessed stack lines, probably
+            obtained by calling ``traceback.extract_stack`` or
+            ``traceback.extract_tb``.
+        :param prefix_content: If specified, a unicode string to prepend to the
+            text content.
+        :param postfix_content: If specified, a unicode string to append to the
+            text content.
+        """
+        content_type = ContentType('text', 'x-traceback',
+            {"language": "python", "charset": "utf8"})
+        value = prefix_content + \
+            self._stack_lines_to_unicode(stack_lines) + \
+            postfix_content
+        super(StackLinesContent, self).__init__(
+            content_type, lambda: [value.encode("utf8")])
+
+    def _stack_lines_to_unicode(self, stack_lines):
+        """Converts a list of pre-processed stack lines into a unicode string.
+        """
+
+        # testtools customization. When str is unicode (e.g. IronPython,
+        # Python 3), traceback.format_exception returns unicode. For Python 2,
+        # it returns bytes. We need to guarantee unicode.
+        if str_is_unicode:
+            format_stack_lines = traceback.format_list
+        else:
+            format_stack_lines = _format_stack_list
+
+        msg_lines = format_stack_lines(stack_lines)
+
+        return ''.join(msg_lines)
+
+
+def TracebackContent(err, test):
+    """Content object for tracebacks.
+
+    This adapts an exc_info tuple to the 'Content' interface.
+    'text/x-traceback;language=python' is used for the mime type, in order to
+    provide room for other languages to format their tracebacks differently.
+    """
+    if err is None:
+        raise ValueError("err may not be None")
+
+    exctype, value, tb = err
+    # Skip test runner traceback levels
+    if StackLinesContent.HIDE_INTERNAL_STACK:
+        while tb and '__unittest' in tb.tb_frame.f_globals:
+            tb = tb.tb_next
+
+    # testtools customization. When str is unicode (e.g. IronPython,
+    # Python 3), traceback.format_exception_only returns unicode. For Python 2,
+    # it returns bytes. We need to guarantee unicode.
+    if str_is_unicode:
+        format_exception_only = traceback.format_exception_only
+    else:
+        format_exception_only = _format_exception_only
+
+    limit = None
+    # Disabled due to https://bugs.launchpad.net/testtools/+bug/1188420
+    if (False
+        and StackLinesContent.HIDE_INTERNAL_STACK
+        and test.failureException
+        and isinstance(value, test.failureException)):
+        # Skip assert*() traceback levels
+        limit = 0
+        while tb and not self._is_relevant_tb_level(tb):
+            limit += 1
+            tb = tb.tb_next
+
+    prefix = _TB_HEADER
+    stack_lines = traceback.extract_tb(tb, limit)
+    postfix = ''.join(format_exception_only(exctype, value))
+
+    return StackLinesContent(stack_lines, prefix, postfix)
+
+
+def StacktraceContent(prefix_content="", postfix_content=""):
+    """Content object for stack traces.
+
+    This function will create and return a 'Content' object that contains a
+    stack trace.
+
+    The mime type is set to 'text/x-traceback;language=python', so other
+    languages can format their stack traces differently.
+
+    :param prefix_content: A unicode string to add before the stack lines.
+    :param postfix_content: A unicode string to add after the stack lines.
+    """
+    stack = inspect.stack()[1:]
+
+    if StackLinesContent.HIDE_INTERNAL_STACK:
+        limit = 1
+        while limit < len(stack) and '__unittest' not in stack[limit][0].f_globals:
+            limit += 1
+    else:
+        limit = -1
+
+    frames_only = [line[0] for line in stack[:limit]]
+    processed_stack = [ ]
+    for frame in reversed(frames_only):
+        filename, line, function, context, _ = inspect.getframeinfo(frame)
+        context = ''.join(context)
+        processed_stack.append((filename, line, function, context))
+    return StackLinesContent(processed_stack, prefix_content, postfix_content)
+
+
+def json_content(json_data):
+    """Create a JSON Content object from JSON-encodeable data."""
+    data = json.dumps(json_data)
+    if str_is_unicode:
+        # The json module perversely returns native str not bytes
+        data = data.encode('utf8')
+    return Content(JSON, lambda: [data])
+
+
+def text_content(text):
+    """Create a Content object from some text.
+
+    This is useful for adding details which are short strings.
+    """
+    if not istext(text):
+        raise TypeError(
+            "text_content must be given text, not '%s'." % type(text).__name__
+        )
+    return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
+
+
+def maybe_wrap(wrapper, func):
+    """Merge metadata for func into wrapper if functools is present."""
+    if functools is not None:
+        wrapper = functools.update_wrapper(wrapper, func)
+    return wrapper
+
+
+def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
+                      buffer_now=False, seek_offset=None, seek_whence=0):
+    """Create a Content object from a file on disk.
+
+    Note that unless ``buffer_now`` is explicitly passed in as True, the file
+    will only be read from when ``iter_bytes`` is called.
+
+    :param path: The path to the file to be used as content.
+    :param content_type: The type of content.  If not specified, defaults
+        to UTF8-encoded text/plain.
+    :param chunk_size: The size of chunks to read from the file.
+        Defaults to ``DEFAULT_CHUNK_SIZE``.
+    :param buffer_now: If True, read the file from disk now and keep it in
+        memory. Otherwise, only read when the content is serialized.
+    :param seek_offset: If non-None, seek within the stream before reading it.
+    :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
+    """
+    if content_type is None:
+        content_type = UTF8_TEXT
+    def reader():
+        with open(path, 'rb') as stream:
+            for chunk in _iter_chunks(stream,
+                                      chunk_size,
+                                      seek_offset,
+                                      seek_whence):
+                yield chunk
+    return content_from_reader(reader, content_type, buffer_now)
+
+
+def content_from_stream(stream, content_type=None,
+                        chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
+                        seek_offset=None, seek_whence=0):
+    """Create a Content object from a file-like stream.
+
+    Note that unless ``buffer_now`` is explicitly passed in as True, the stream
+    will only be read from when ``iter_bytes`` is called.
+
+    :param stream: A file-like object to read the content from. The stream
+        is not closed by this function or the 'Content' object it returns.
+    :param content_type: The type of content. If not specified, defaults
+        to UTF8-encoded text/plain.
+    :param chunk_size: The size of chunks to read from the file.
+        Defaults to ``DEFAULT_CHUNK_SIZE``.
+    :param buffer_now: If True, reads from the stream right now. Otherwise,
+        only reads when the content is serialized. Defaults to False.
+    :param seek_offset: If non-None, seek within the stream before reading it.
+    :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
+    """
+    if content_type is None:
+        content_type = UTF8_TEXT
+    reader = lambda: _iter_chunks(stream, chunk_size, seek_offset, seek_whence)
+    return content_from_reader(reader, content_type, buffer_now)
+
+
+def content_from_reader(reader, content_type, buffer_now):
+    """Create a Content object that will obtain the content from reader.
+
+    :param reader: A callback to read the content. Should return an iterable of
+        bytestrings.
+    :param content_type: The content type to create.
+    :param buffer_now: If True the reader is evaluated immediately and
+        buffered.
+    """
+    if content_type is None:
+        content_type = UTF8_TEXT
+    if buffer_now:
+        contents = list(reader())
+        reader = lambda: contents
+    return Content(content_type, reader)
+
+
+def attach_file(detailed, path, name=None, content_type=None,
+                chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
+    """Attach a file to this test as a detail.
+
+    This is a convenience method wrapping around ``addDetail``.
+
+    Note that by default the contents of the file will be read immediately. If
+    ``buffer_now`` is False, then the file *must* exist when the test result is
+    called with the results of this test, after the test has been torn down.
+
+    :param detailed: An object with details
+    :param path: The path to the file to attach.
+    :param name: The name to give to the detail for the attached file.
+    :param content_type: The content type of the file.  If not provided,
+        defaults to UTF8-encoded text/plain.
+    :param chunk_size: The size of chunks to read from the file.  Defaults
+        to something sensible.
+    :param buffer_now: If False the file content is read when the content
+        object is evaluated rather than when attach_file is called.
+        Note that this may be after any cleanups that obj_with_details has, so
+        if the file is a temporary file disabling buffer_now may cause the file
+        to be read after it is deleted. To handle those cases, using
+        attach_file as a cleanup is recommended because it guarantees a
+        sequence for when the attach_file call is made::
+
+            detailed.addCleanup(attach_file, 'foo.txt', detailed)
+    """
+    if name is None:
+        name = os.path.basename(path)
+    content_object = content_from_file(
+        path, content_type, chunk_size, buffer_now)
+    detailed.addDetail(name, content_object)
diff --git a/third_party/testtools/testtools/content_type.py b/third_party/testtools/testtools/content_type.py
new file mode 100644
index 0000000..bbf314b
--- /dev/null
+++ b/third_party/testtools/testtools/content_type.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""ContentType - a MIME Content Type."""
+
+
+class ContentType(object):
+    """A content type from http://www.iana.org/assignments/media-types/
+
+    :ivar type: The primary type, e.g. "text" or "application"
+    :ivar subtype: The subtype, e.g. "plain" or "octet-stream"
+    :ivar parameters: A dict of additional parameters specific to the
+        content type.
+    """
+
+    def __init__(self, primary_type, sub_type, parameters=None):
+        """Create a ContentType."""
+        if None in (primary_type, sub_type):
+            raise ValueError("None not permitted in %r, %r" % (
+                primary_type, sub_type))
+        self.type = primary_type
+        self.subtype = sub_type
+        self.parameters = parameters or {}
+
+    def __eq__(self, other):
+        if type(other) != ContentType:
+            return False
+        return self.__dict__ == other.__dict__
+
+    def __repr__(self):
+        if self.parameters:
+            params = '; '
+            params += '; '.join(
+                sorted('%s="%s"' % (k, v) for k, v in self.parameters.items()))
+        else:
+            params = ''
+        return "%s/%s%s" % (self.type, self.subtype, params)
+
+
+JSON = ContentType('application', 'json')
+
+UTF8_TEXT = ContentType('text', 'plain', {'charset': 'utf8'})
diff --git a/third_party/testtools/testtools/deferredruntest.py b/third_party/testtools/testtools/deferredruntest.py
new file mode 100644
index 0000000..d22c79f
--- /dev/null
+++ b/third_party/testtools/testtools/deferredruntest.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Individual test case execution for tests that return Deferreds.
+
+This module is highly experimental and is liable to change in ways that cause
+subtle failures in tests.  Use at your own peril.
+"""
+
+__all__ = [
+    'assert_fails_with',
+    'AsynchronousDeferredRunTest',
+    'AsynchronousDeferredRunTestForBrokenTwisted',
+    'SynchronousDeferredRunTest',
+    ]
+
+import sys
+
+from testtools.compat import StringIO
+from testtools.content import (
+    Content,
+    text_content,
+    )
+from testtools.content_type import UTF8_TEXT
+from testtools.runtest import RunTest
+from testtools._spinner import (
+    extract_result,
+    NoResultError,
+    Spinner,
+    TimeoutError,
+    trap_unhandled_errors,
+    )
+
+from twisted.internet import defer
+from twisted.python import log
+from twisted.trial.unittest import _LogObserver
+
+
+class _DeferredRunTest(RunTest):
+    """Base for tests that return Deferreds."""
+
+    def _got_user_failure(self, failure, tb_label='traceback'):
+        """We got a failure from user code."""
+        return self._got_user_exception(
+            (failure.type, failure.value, failure.getTracebackObject()),
+            tb_label=tb_label)
+
+
+class SynchronousDeferredRunTest(_DeferredRunTest):
+    """Runner for tests that return synchronous Deferreds."""
+
+    def _run_user(self, function, *args):
+        d = defer.maybeDeferred(function, *args)
+        d.addErrback(self._got_user_failure)
+        result = extract_result(d)
+        return result
+
+
+def run_with_log_observers(observers, function, *args, **kwargs):
+    """Run 'function' with the given Twisted log observers."""
+    real_observers = list(log.theLogPublisher.observers)
+    for observer in real_observers:
+        log.theLogPublisher.removeObserver(observer)
+    for observer in observers:
+        log.theLogPublisher.addObserver(observer)
+    try:
+        return function(*args, **kwargs)
+    finally:
+        for observer in observers:
+            log.theLogPublisher.removeObserver(observer)
+        for observer in real_observers:
+            log.theLogPublisher.addObserver(observer)
+
+
+# Observer of the Twisted log that we install during tests.
+_log_observer = _LogObserver()
+
+
+
+class AsynchronousDeferredRunTest(_DeferredRunTest):
+    """Runner for tests that return Deferreds that fire asynchronously.
+
+    That is, this test runner assumes that the Deferreds will only fire if the
+    reactor is left to spin for a while.
+
+    Do not rely too heavily on the nuances of the behaviour of this class.
+    What it does to the reactor is black magic, and if we can find nicer ways
+    of doing it we will gladly break backwards compatibility.
+
+    This is highly experimental code.  Use at your own risk.
+    """
+
+    def __init__(self, case, handlers=None, last_resort=None, reactor=None,
+                 timeout=0.005, debug=False):
+        """Construct an `AsynchronousDeferredRunTest`.
+
+        Please be sure to always use keyword syntax, not positional, as the
+        base class may add arguments in future - and for core code
+        compatibility with that we have to insert them before the local
+        parameters.
+
+        :param case: The `TestCase` to run.
+        :param handlers: A list of exception handlers (ExceptionType, handler)
+            where 'handler' is a callable that takes a `TestCase`, a
+            ``testtools.TestResult`` and the exception raised.
+        :param last_resort: Handler to call before re-raising uncatchable
+            exceptions (those for which there is no handler).
+        :param reactor: The Twisted reactor to use.  If not given, we use the
+            default reactor.
+        :param timeout: The maximum time allowed for running a test.  The
+            default is 0.005s.
+        :param debug: Whether or not to enable Twisted's debugging.  Use this
+            to get information about unhandled Deferreds and left-over
+            DelayedCalls.  Defaults to False.
+        """
+        super(AsynchronousDeferredRunTest, self).__init__(
+            case, handlers, last_resort)
+        if reactor is None:
+            from twisted.internet import reactor
+        self._reactor = reactor
+        self._timeout = timeout
+        self._debug = debug
+
+    @classmethod
+    def make_factory(cls, reactor=None, timeout=0.005, debug=False):
+        """Make a factory that conforms to the RunTest factory interface."""
+        # This is horrible, but it means that the return value of the method
+        # will be able to be assigned to a class variable *and* also be
+        # invoked directly.
+        class AsynchronousDeferredRunTestFactory:
+            def __call__(self, case, handlers=None, last_resort=None):
+                return cls(case, handlers, last_resort, reactor, timeout, debug)
+        return AsynchronousDeferredRunTestFactory()
+
+    @defer.deferredGenerator
+    def _run_cleanups(self):
+        """Run the cleanups on the test case.
+
+        We expect that the cleanups on the test case can also return
+        asynchronous Deferreds.  As such, we take the responsibility for
+        running the cleanups, rather than letting TestCase do it.
+        """
+        while self.case._cleanups:
+            f, args, kwargs = self.case._cleanups.pop()
+            d = defer.maybeDeferred(f, *args, **kwargs)
+            thing = defer.waitForDeferred(d)
+            yield thing
+            try:
+                thing.getResult()
+            except Exception:
+                exc_info = sys.exc_info()
+                self.case._report_traceback(exc_info)
+                last_exception = exc_info[1]
+        yield last_exception
+
+    def _make_spinner(self):
+        """Make the `Spinner` to be used to run the tests."""
+        return Spinner(self._reactor, debug=self._debug)
+
+    def _run_deferred(self):
+        """Run the test, assuming everything in it is Deferred-returning.
+
+        This should return a Deferred that fires with True if the test was
+        successful and False if the test was not successful.  It should *not*
+        call addSuccess on the result, because there's reactor clean up that
+        we needs to be done afterwards.
+        """
+        fails = []
+
+        def fail_if_exception_caught(exception_caught):
+            if self.exception_caught == exception_caught:
+                fails.append(None)
+
+        def clean_up(ignored=None):
+            """Run the cleanups."""
+            d = self._run_cleanups()
+            def clean_up_done(result):
+                if result is not None:
+                    self._exceptions.append(result)
+                    fails.append(None)
+            return d.addCallback(clean_up_done)
+
+        def set_up_done(exception_caught):
+            """Set up is done, either clean up or run the test."""
+            if self.exception_caught == exception_caught:
+                fails.append(None)
+                return clean_up()
+            else:
+                d = self._run_user(self.case._run_test_method, self.result)
+                d.addCallback(fail_if_exception_caught)
+                d.addBoth(tear_down)
+                return d
+
+        def tear_down(ignored):
+            d = self._run_user(self.case._run_teardown, self.result)
+            d.addCallback(fail_if_exception_caught)
+            d.addBoth(clean_up)
+            return d
+
+        d = self._run_user(self.case._run_setup, self.result)
+        d.addCallback(set_up_done)
+        d.addBoth(lambda ignored: len(fails) == 0)
+        return d
+
+    def _log_user_exception(self, e):
+        """Raise 'e' and report it as a user exception."""
+        try:
+            raise e
+        except e.__class__:
+            self._got_user_exception(sys.exc_info())
+
+    def _blocking_run_deferred(self, spinner):
+        try:
+            return trap_unhandled_errors(
+                spinner.run, self._timeout, self._run_deferred)
+        except NoResultError:
+            # We didn't get a result at all!  This could be for any number of
+            # reasons, but most likely someone hit Ctrl-C during the test.
+            raise KeyboardInterrupt
+        except TimeoutError:
+            # The function took too long to run.
+            self._log_user_exception(TimeoutError(self.case, self._timeout))
+            return False, []
+
+    def _run_core(self):
+        # Add an observer to trap all logged errors.
+        self.case.reactor = self._reactor
+        error_observer = _log_observer
+        full_log = StringIO()
+        full_observer = log.FileLogObserver(full_log)
+        spinner = self._make_spinner()
+        successful, unhandled = run_with_log_observers(
+            [error_observer.gotEvent, full_observer.emit],
+            self._blocking_run_deferred, spinner)
+
+        self.case.addDetail(
+            'twisted-log', Content(UTF8_TEXT, full_log.readlines))
+
+        logged_errors = error_observer.flushErrors()
+        for logged_error in logged_errors:
+            successful = False
+            self._got_user_failure(logged_error, tb_label='logged-error')
+
+        if unhandled:
+            successful = False
+            for debug_info in unhandled:
+                f = debug_info.failResult
+                info = debug_info._getDebugTracebacks()
+                if info:
+                    self.case.addDetail(
+                        'unhandled-error-in-deferred-debug',
+                        text_content(info))
+                self._got_user_failure(f, 'unhandled-error-in-deferred')
+
+        junk = spinner.clear_junk()
+        if junk:
+            successful = False
+            self._log_user_exception(UncleanReactorError(junk))
+
+        if successful:
+            self.result.addSuccess(self.case, details=self.case.getDetails())
+
+    def _run_user(self, function, *args):
+        """Run a user-supplied function.
+
+        This just makes sure that it returns a Deferred, regardless of how the
+        user wrote it.
+        """
+        d = defer.maybeDeferred(function, *args)
+        return d.addErrback(self._got_user_failure)
+
+
+class AsynchronousDeferredRunTestForBrokenTwisted(AsynchronousDeferredRunTest):
+    """Test runner that works around Twisted brokenness re reactor junk.
+
+    There are many APIs within Twisted itself where a Deferred fires but
+    leaves cleanup work scheduled for the reactor to do.  Arguably, many of
+    these are bugs.  This runner iterates the reactor event loop a number of
+    times after every test, in order to shake out these buggy-but-commonplace
+    events.
+    """
+
+    def _make_spinner(self):
+        spinner = super(
+            AsynchronousDeferredRunTestForBrokenTwisted, self)._make_spinner()
+        spinner._OBLIGATORY_REACTOR_ITERATIONS = 2
+        return spinner
+
+
+def assert_fails_with(d, *exc_types, **kwargs):
+    """Assert that 'd' will fail with one of 'exc_types'.
+
+    The normal way to use this is to return the result of 'assert_fails_with'
+    from your unit test.
+
+    Note that this function is experimental and unstable.  Use at your own
+    peril; expect the API to change.
+
+    :param d: A Deferred that is expected to fail.
+    :param exc_types: The exception types that the Deferred is expected to
+        fail with.
+    :param failureException: An optional keyword argument.  If provided, will
+        raise that exception instead of
+        ``testtools.TestCase.failureException``.
+    :return: A Deferred that will fail with an ``AssertionError`` if 'd' does
+        not fail with one of the exception types.
+    """
+    failureException = kwargs.pop('failureException', None)
+    if failureException is None:
+        # Avoid circular imports.
+        from testtools import TestCase
+        failureException = TestCase.failureException
+    expected_names = ", ".join(exc_type.__name__ for exc_type in exc_types)
+    def got_success(result):
+        raise failureException(
+            "%s not raised (%r returned)" % (expected_names, result))
+    def got_failure(failure):
+        if failure.check(*exc_types):
+            return failure.value
+        raise failureException("%s raised instead of %s:\n %s" % (
+            failure.type.__name__, expected_names, failure.getTraceback()))
+    return d.addCallbacks(got_success, got_failure)
+
+
+def flush_logged_errors(*error_types):
+    return _log_observer.flushErrors(*error_types)
+
+
+class UncleanReactorError(Exception):
+    """Raised when the reactor has junk in it."""
+
+    def __init__(self, junk):
+        Exception.__init__(self,
+            "The reactor still thinks it needs to do things. Close all "
+            "connections, kill all processes and make sure all delayed "
+            "calls have either fired or been cancelled:\n%s"
+            % ''.join(map(self._get_junk_info, junk)))
+
+    def _get_junk_info(self, junk):
+        from twisted.internet.base import DelayedCall
+        if isinstance(junk, DelayedCall):
+            ret = str(junk)
+        else:
+            ret = repr(junk)
+        return '  %s\n' % (ret,)
diff --git a/third_party/testtools/testtools/distutilscmd.py b/third_party/testtools/testtools/distutilscmd.py
new file mode 100644
index 0000000..a4d79dc
--- /dev/null
+++ b/third_party/testtools/testtools/distutilscmd.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2010-2011 testtools developers . See LICENSE for details.
+
+"""Extensions to the standard Python unittest library."""
+
+import sys
+
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+
+from testtools.run import TestProgram, TestToolsTestRunner
+
+
+class TestCommand(Command):
+    """Command to run unit tests with testtools"""
+
+    description = "run unit tests with testtools"
+
+    user_options = [
+        ('catch', 'c', "Catch ctrl-C and display results so far"),
+        ('buffer', 'b', "Buffer stdout and stderr during tests"),
+        ('failfast', 'f', "Stop on first fail or error"),
+        ('test-module=','m', "Run 'test_suite' in specified module"),
+        ('test-suite=','s',
+         "Test suite to run (e.g. 'some_module.test_suite')")
+    ]
+
+    def __init__(self, dist):
+        Command.__init__(self, dist)
+        self.runner = TestToolsTestRunner(stdout=sys.stdout)
+
+
+    def initialize_options(self):
+        self.test_suite = None
+        self.test_module = None
+        self.catch = None
+        self.buffer = None
+        self.failfast = None
+
+    def finalize_options(self):
+        if self.test_suite is None:
+            if self.test_module is None:
+                raise DistutilsOptionError(
+                    "You must specify a module or a suite to run tests from")
+            else:
+                self.test_suite = self.test_module+".test_suite"
+        elif self.test_module:
+            raise DistutilsOptionError(
+                "You may specify a module or a suite, but not both")
+        self.test_args = [self.test_suite]
+        if self.verbose:
+            self.test_args.insert(0, '--verbose')
+        if self.buffer:
+            self.test_args.insert(0, '--buffer')
+        if self.catch:
+            self.test_args.insert(0, '--catch')
+        if self.failfast:
+            self.test_args.insert(0, '--failfast')
+
+    def run(self):
+        self.program = TestProgram(
+            argv=self.test_args, testRunner=self.runner, stdout=sys.stdout,
+            exit=False)
diff --git a/third_party/testtools/testtools/helpers.py b/third_party/testtools/testtools/helpers.py
new file mode 100644
index 0000000..401d2cc
--- /dev/null
+++ b/third_party/testtools/testtools/helpers.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'safe_hasattr',
+    'try_import',
+    'try_imports',
+    ]
+
+import sys
+
+# Compat - removal announced in 0.9.25.
+from extras import (
+    safe_hasattr,
+    try_import,
+    try_imports,
+    )
+
+
+def map_values(function, dictionary):
+    """Map ``function`` across the values of ``dictionary``.
+
+    :return: A dict with the same keys as ``dictionary``, where the value
+        of each key ``k`` is ``function(dictionary[k])``.
+    """
+    return dict((k, function(dictionary[k])) for k in dictionary)
+
+
+def filter_values(function, dictionary):
+    """Filter ``dictionary`` by its values using ``function``."""
+    return dict((k, v) for k, v in dictionary.items() if function(v))
+
+
+def dict_subtract(a, b):
+    """Return the part of ``a`` that's not in ``b``."""
+    return dict((k, a[k]) for k in set(a) - set(b))
+
+
+def list_subtract(a, b):
+    """Return a list ``a`` without the elements of ``b``.
+
+    If a particular value is in ``a`` twice and ``b`` once then the returned
+    list then that value will appear once in the returned list.
+    """
+    a_only = list(a)
+    for x in b:
+        if x in a_only:
+            a_only.remove(x)
+    return a_only
diff --git a/third_party/testtools/testtools/matchers/__init__.py b/third_party/testtools/testtools/matchers/__init__.py
new file mode 100644
index 0000000..771d814
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/__init__.py
@@ -0,0 +1,119 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""All the matchers.
+
+Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+    'AfterPreprocessing',
+    'AllMatch',
+    'Annotate',
+    'AnyMatch',
+    'Contains',
+    'ContainsAll',
+    'ContainedByDict',
+    'ContainsDict',
+    'DirContains',
+    'DirExists',
+    'DocTestMatches',
+    'EndsWith',
+    'Equals',
+    'FileContains',
+    'FileExists',
+    'GreaterThan',
+    'HasLength',
+    'HasPermissions',
+    'Is',
+    'IsInstance',
+    'KeysEqual',
+    'LessThan',
+    'MatchesAll',
+    'MatchesAny',
+    'MatchesDict',
+    'MatchesException',
+    'MatchesListwise',
+    'MatchesPredicate',
+    'MatchesPredicateWithParams',
+    'MatchesRegex',
+    'MatchesSetwise',
+    'MatchesStructure',
+    'NotEquals',
+    'Not',
+    'PathExists',
+    'Raises',
+    'raises',
+    'SamePath',
+    'StartsWith',
+    'TarballContains',
+    ]
+
+from ._basic import (
+    Contains,
+    EndsWith,
+    Equals,
+    GreaterThan,
+    HasLength,
+    Is,
+    IsInstance,
+    LessThan,
+    MatchesRegex,
+    NotEquals,
+    StartsWith,
+    )
+from ._datastructures import (
+    ContainsAll,
+    MatchesListwise,
+    MatchesSetwise,
+    MatchesStructure,
+    )
+from ._dict import (
+    ContainedByDict,
+    ContainsDict,
+    KeysEqual,
+    MatchesDict,
+    )
+from ._doctest import (
+    DocTestMatches,
+    )
+from ._exception import (
+    MatchesException,
+    Raises,
+    raises,
+    )
+from ._filesystem import (
+    DirContains,
+    DirExists,
+    FileContains,
+    FileExists,
+    HasPermissions,
+    PathExists,
+    SamePath,
+    TarballContains,
+    )
+from ._higherorder import (
+    AfterPreprocessing,
+    AllMatch,
+    Annotate,
+    AnyMatch,
+    MatchesAll,
+    MatchesAny,
+    MatchesPredicate,
+    MatchesPredicateWithParams,
+    Not,
+    )
+
+# XXX: These are not explicitly included in __all__.  It's unclear how much of
+# the public interface they really are.
+from ._impl import (
+    Matcher,
+    Mismatch,
+    MismatchError,
+    )
diff --git a/third_party/testtools/testtools/matchers/_basic.py b/third_party/testtools/testtools/matchers/_basic.py
new file mode 100644
index 0000000..2d9f143
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_basic.py
@@ -0,0 +1,326 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'Contains',
+    'EndsWith',
+    'Equals',
+    'GreaterThan',
+    'HasLength',
+    'Is',
+    'IsInstance',
+    'LessThan',
+    'MatchesRegex',
+    'NotEquals',
+    'StartsWith',
+    ]
+
+import operator
+from pprint import pformat
+import re
+
+from ..compat import (
+    _isbytes,
+    istext,
+    str_is_unicode,
+    text_repr,
+    )
+from ..helpers import list_subtract
+from ._higherorder import (
+    MatchesPredicateWithParams,
+    PostfixedMismatch,
+    )
+from ._impl import (
+    Matcher,
+    Mismatch,
+    )
+
+
+def _format(thing):
+    """
+    Blocks of text with newlines are formatted as triple-quote
+    strings. Everything else is pretty-printed.
+    """
+    if istext(thing) or _isbytes(thing):
+        return text_repr(thing)
+    return pformat(thing)
+
+
+class _BinaryComparison(object):
+    """Matcher that compares an object to another object."""
+
+    def __init__(self, expected):
+        self.expected = expected
+
+    def __str__(self):
+        return "%s(%r)" % (self.__class__.__name__, self.expected)
+
+    def match(self, other):
+        if self.comparator(other, self.expected):
+            return None
+        return _BinaryMismatch(self.expected, self.mismatch_string, other)
+
+    def comparator(self, expected, other):
+        raise NotImplementedError(self.comparator)
+
+
+class _BinaryMismatch(Mismatch):
+    """Two things did not match."""
+
+    def __init__(self, expected, mismatch_string, other):
+        self.expected = expected
+        self._mismatch_string = mismatch_string
+        self.other = other
+
+    def describe(self):
+        left = repr(self.expected)
+        right = repr(self.other)
+        if len(left) + len(right) > 70:
+            return "%s:\nreference = %s\nactual    = %s\n" % (
+                self._mismatch_string, _format(self.expected),
+                _format(self.other))
+        else:
+            return "%s %s %s" % (left, self._mismatch_string, right)
+
+
+class Equals(_BinaryComparison):
+    """Matches if the items are equal."""
+
+    comparator = operator.eq
+    mismatch_string = '!='
+
+
+class NotEquals(_BinaryComparison):
+    """Matches if the items are not equal.
+
+    In most cases, this is equivalent to ``Not(Equals(foo))``. The difference
+    only matters when testing ``__ne__`` implementations.
+    """
+
+    comparator = operator.ne
+    mismatch_string = '=='
+
+
+class Is(_BinaryComparison):
+    """Matches if the items are identical."""
+
+    comparator = operator.is_
+    mismatch_string = 'is not'
+
+
+class LessThan(_BinaryComparison):
+    """Matches if the item is less than the matchers reference object."""
+
+    comparator = operator.__lt__
+    mismatch_string = 'is not >'
+
+
+class GreaterThan(_BinaryComparison):
+    """Matches if the item is greater than the matchers reference object."""
+
+    comparator = operator.__gt__
+    mismatch_string = 'is not <'
+
+
+class SameMembers(Matcher):
+    """Matches if two iterators have the same members.
+
+    This is not the same as set equivalence.  The two iterators must be of the
+    same length and have the same repetitions.
+    """
+
+    def __init__(self, expected):
+        super(SameMembers, self).__init__()
+        self.expected = expected
+
+    def __str__(self):
+        return '%s(%r)' % (self.__class__.__name__, self.expected)
+
+    def match(self, observed):
+        expected_only = list_subtract(self.expected, observed)
+        observed_only = list_subtract(observed, self.expected)
+        if expected_only == observed_only == []:
+            return
+        return PostfixedMismatch(
+            "\nmissing:    %s\nextra:      %s" % (
+                _format(expected_only), _format(observed_only)),
+            _BinaryMismatch(self.expected, 'elements differ', observed))
+
+
+class DoesNotStartWith(Mismatch):
+
+    def __init__(self, matchee, expected):
+        """Create a DoesNotStartWith Mismatch.
+
+        :param matchee: the string that did not match.
+        :param expected: the string that 'matchee' was expected to start with.
+        """
+        self.matchee = matchee
+        self.expected = expected
+
+    def describe(self):
+        return "%s does not start with %s." % (
+            text_repr(self.matchee), text_repr(self.expected))
+
+
+class StartsWith(Matcher):
+    """Checks whether one string starts with another."""
+
+    def __init__(self, expected):
+        """Create a StartsWith Matcher.
+
+        :param expected: the string that matchees should start with.
+        """
+        self.expected = expected
+
+    def __str__(self):
+        return "StartsWith(%r)" % (self.expected,)
+
+    def match(self, matchee):
+        if not matchee.startswith(self.expected):
+            return DoesNotStartWith(matchee, self.expected)
+        return None
+
+
+class DoesNotEndWith(Mismatch):
+
+    def __init__(self, matchee, expected):
+        """Create a DoesNotEndWith Mismatch.
+
+        :param matchee: the string that did not match.
+        :param expected: the string that 'matchee' was expected to end with.
+        """
+        self.matchee = matchee
+        self.expected = expected
+
+    def describe(self):
+        return "%s does not end with %s." % (
+            text_repr(self.matchee), text_repr(self.expected))
+
+
+class EndsWith(Matcher):
+    """Checks whether one string ends with another."""
+
+    def __init__(self, expected):
+        """Create a EndsWith Matcher.
+
+        :param expected: the string that matchees should end with.
+        """
+        self.expected = expected
+
+    def __str__(self):
+        return "EndsWith(%r)" % (self.expected,)
+
+    def match(self, matchee):
+        if not matchee.endswith(self.expected):
+            return DoesNotEndWith(matchee, self.expected)
+        return None
+
+
+class IsInstance(object):
+    """Matcher that wraps isinstance."""
+
+    def __init__(self, *types):
+        self.types = tuple(types)
+
+    def __str__(self):
+        return "%s(%s)" % (self.__class__.__name__,
+                ', '.join(type.__name__ for type in self.types))
+
+    def match(self, other):
+        if isinstance(other, self.types):
+            return None
+        return NotAnInstance(other, self.types)
+
+
+class NotAnInstance(Mismatch):
+
+    def __init__(self, matchee, types):
+        """Create a NotAnInstance Mismatch.
+
+        :param matchee: the thing which is not an instance of any of types.
+        :param types: A tuple of the types which were expected.
+        """
+        self.matchee = matchee
+        self.types = types
+
+    def describe(self):
+        if len(self.types) == 1:
+            typestr = self.types[0].__name__
+        else:
+            typestr = 'any of (%s)' % ', '.join(type.__name__ for type in
+                    self.types)
+        return "'%s' is not an instance of %s" % (self.matchee, typestr)
+
+
+class DoesNotContain(Mismatch):
+
+    def __init__(self, matchee, needle):
+        """Create a DoesNotContain Mismatch.
+
+        :param matchee: the object that did not contain needle.
+        :param needle: the needle that 'matchee' was expected to contain.
+        """
+        self.matchee = matchee
+        self.needle = needle
+
+    def describe(self):
+        return "%r not in %r" % (self.needle, self.matchee)
+
+
+class Contains(Matcher):
+    """Checks whether something is contained in another thing."""
+
+    def __init__(self, needle):
+        """Create a Contains Matcher.
+
+        :param needle: the thing that needs to be contained by matchees.
+        """
+        self.needle = needle
+
+    def __str__(self):
+        return "Contains(%r)" % (self.needle,)
+
+    def match(self, matchee):
+        try:
+            if self.needle not in matchee:
+                return DoesNotContain(matchee, self.needle)
+        except TypeError:
+            # e.g. 1 in 2 will raise TypeError
+            return DoesNotContain(matchee, self.needle)
+        return None
+
+
+class MatchesRegex(object):
+    """Matches if the matchee is matched by a regular expression."""
+
+    def __init__(self, pattern, flags=0):
+        self.pattern = pattern
+        self.flags = flags
+
+    def __str__(self):
+        args = ['%r' % self.pattern]
+        flag_arg = []
+        # dir() sorts the attributes for us, so we don't need to do it again.
+        for flag in dir(re):
+            if len(flag) == 1:
+                if self.flags & getattr(re, flag):
+                    flag_arg.append('re.%s' % flag)
+        if flag_arg:
+            args.append('|'.join(flag_arg))
+        return '%s(%s)' % (self.__class__.__name__, ', '.join(args))
+
+    def match(self, value):
+        if not re.match(self.pattern, value, self.flags):
+            pattern = self.pattern
+            if not isinstance(pattern, str_is_unicode and str or unicode):
+                pattern = pattern.decode("latin1")
+            pattern = pattern.encode("unicode_escape").decode("ascii")
+            return Mismatch("%r does not match /%s/" % (
+                    value, pattern.replace("\\\\", "\\")))
+
+
+def has_len(x, y):
+    return len(x) == y
+
+
+HasLength = MatchesPredicateWithParams(has_len, "len({0}) != {1}", "HasLength")
diff --git a/third_party/testtools/testtools/matchers/_datastructures.py b/third_party/testtools/testtools/matchers/_datastructures.py
new file mode 100644
index 0000000..70de790
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_datastructures.py
@@ -0,0 +1,228 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'ContainsAll',
+    'MatchesListwise',
+    'MatchesSetwise',
+    'MatchesStructure',
+    ]
+
+"""Matchers that operate with knowledge of Python data structures."""
+
+from ..helpers import map_values
+from ._higherorder import (
+    Annotate,
+    MatchesAll,
+    MismatchesAll,
+    )
+from ._impl import Mismatch
+
+
+def ContainsAll(items):
+    """Make a matcher that checks whether a list of things is contained
+    in another thing.
+
+    The matcher effectively checks that the provided sequence is a subset of
+    the matchee.
+    """
+    from ._basic import Contains
+    return MatchesAll(*map(Contains, items), first_only=False)
+
+
+class MatchesListwise(object):
+    """Matches if each matcher matches the corresponding value.
+
+    More easily explained by example than in words:
+
+    >>> from ._basic import Equals
+    >>> MatchesListwise([Equals(1)]).match([1])
+    >>> MatchesListwise([Equals(1), Equals(2)]).match([1, 2])
+    >>> print (MatchesListwise([Equals(1), Equals(2)]).match([2, 1]).describe())
+    Differences: [
+    1 != 2
+    2 != 1
+    ]
+    >>> matcher = MatchesListwise([Equals(1), Equals(2)], first_only=True)
+    >>> print (matcher.match([3, 4]).describe())
+    1 != 3
+    """
+
+    def __init__(self, matchers, first_only=False):
+        """Construct a MatchesListwise matcher.
+
+        :param matchers: A list of matcher that the matched values must match.
+        :param first_only: If True, then only report the first mismatch,
+            otherwise report all of them. Defaults to False.
+        """
+        self.matchers = matchers
+        self.first_only = first_only
+
+    def match(self, values):
+        from ._basic import Equals
+        mismatches = []
+        length_mismatch = Annotate(
+            "Length mismatch", Equals(len(self.matchers))).match(len(values))
+        if length_mismatch:
+            mismatches.append(length_mismatch)
+        for matcher, value in zip(self.matchers, values):
+            mismatch = matcher.match(value)
+            if mismatch:
+                if self.first_only:
+                    return mismatch
+                mismatches.append(mismatch)
+        if mismatches:
+            return MismatchesAll(mismatches)
+
+
+class MatchesStructure(object):
+    """Matcher that matches an object structurally.
+
+    'Structurally' here means that attributes of the object being matched are
+    compared against given matchers.
+
+    `fromExample` allows the creation of a matcher from a prototype object and
+    then modified versions can be created with `update`.
+
+    `byEquality` creates a matcher in much the same way as the constructor,
+    except that the matcher for each of the attributes is assumed to be
+    `Equals`.
+
+    `byMatcher` creates a similar matcher to `byEquality`, but you get to pick
+    the matcher, rather than just using `Equals`.
+    """
+
+    def __init__(self, **kwargs):
+        """Construct a `MatchesStructure`.
+
+        :param kwargs: A mapping of attributes to matchers.
+        """
+        self.kws = kwargs
+
+    @classmethod
+    def byEquality(cls, **kwargs):
+        """Matches an object where the attributes equal the keyword values.
+
+        Similar to the constructor, except that the matcher is assumed to be
+        Equals.
+        """
+        from ._basic import Equals
+        return cls.byMatcher(Equals, **kwargs)
+
+    @classmethod
+    def byMatcher(cls, matcher, **kwargs):
+        """Matches an object where the attributes match the keyword values.
+
+        Similar to the constructor, except that the provided matcher is used
+        to match all of the values.
+        """
+        return cls(**map_values(matcher, kwargs))
+
+    @classmethod
+    def fromExample(cls, example, *attributes):
+        from ._basic import Equals
+        kwargs = {}
+        for attr in attributes:
+            kwargs[attr] = Equals(getattr(example, attr))
+        return cls(**kwargs)
+
+    def update(self, **kws):
+        new_kws = self.kws.copy()
+        for attr, matcher in kws.items():
+            if matcher is None:
+                new_kws.pop(attr, None)
+            else:
+                new_kws[attr] = matcher
+        return type(self)(**new_kws)
+
+    def __str__(self):
+        kws = []
+        for attr, matcher in sorted(self.kws.items()):
+            kws.append("%s=%s" % (attr, matcher))
+        return "%s(%s)" % (self.__class__.__name__, ', '.join(kws))
+
+    def match(self, value):
+        matchers = []
+        values = []
+        for attr, matcher in sorted(self.kws.items()):
+            matchers.append(Annotate(attr, matcher))
+            values.append(getattr(value, attr))
+        return MatchesListwise(matchers).match(values)
+
+
+class MatchesSetwise(object):
+    """Matches if all the matchers match elements of the value being matched.
+
+    That is, each element in the 'observed' set must match exactly one matcher
+    from the set of matchers, with no matchers left over.
+
+    The difference compared to `MatchesListwise` is that the order of the
+    matchings does not matter.
+    """
+
+    def __init__(self, *matchers):
+        self.matchers = matchers
+
+    def match(self, observed):
+        remaining_matchers = set(self.matchers)
+        not_matched = []
+        for value in observed:
+            for matcher in remaining_matchers:
+                if matcher.match(value) is None:
+                    remaining_matchers.remove(matcher)
+                    break
+            else:
+                not_matched.append(value)
+        if not_matched or remaining_matchers:
+            remaining_matchers = list(remaining_matchers)
+            # There are various cases that all should be reported somewhat
+            # differently.
+
+            # There are two trivial cases:
+            # 1) There are just some matchers left over.
+            # 2) There are just some values left over.
+
+            # Then there are three more interesting cases:
+            # 3) There are the same number of matchers and values left over.
+            # 4) There are more matchers left over than values.
+            # 5) There are more values left over than matchers.
+
+            if len(not_matched) == 0:
+                if len(remaining_matchers) > 1:
+                    msg = "There were %s matchers left over: " % (
+                        len(remaining_matchers),)
+                else:
+                    msg = "There was 1 matcher left over: "
+                msg += ', '.join(map(str, remaining_matchers))
+                return Mismatch(msg)
+            elif len(remaining_matchers) == 0:
+                if len(not_matched) > 1:
+                    return Mismatch(
+                        "There were %s values left over: %s" % (
+                            len(not_matched), not_matched))
+                else:
+                    return Mismatch(
+                        "There was 1 value left over: %s" % (
+                            not_matched, ))
+            else:
+                common_length = min(len(remaining_matchers), len(not_matched))
+                if common_length == 0:
+                    raise AssertionError("common_length can't be 0 here")
+                if common_length > 1:
+                    msg = "There were %s mismatches" % (common_length,)
+                else:
+                    msg = "There was 1 mismatch"
+                if len(remaining_matchers) > len(not_matched):
+                    extra_matchers = remaining_matchers[common_length:]
+                    msg += " and %s extra matcher" % (len(extra_matchers), )
+                    if len(extra_matchers) > 1:
+                        msg += "s"
+                    msg += ': ' + ', '.join(map(str, extra_matchers))
+                elif len(not_matched) > len(remaining_matchers):
+                    extra_values = not_matched[common_length:]
+                    msg += " and %s extra value" % (len(extra_values), )
+                    if len(extra_values) > 1:
+                        msg += "s"
+                    msg += ': ' + str(extra_values)
+                return Annotate(
+                    msg, MatchesListwise(remaining_matchers[:common_length])
+                    ).match(not_matched[:common_length])
diff --git a/third_party/testtools/testtools/matchers/_dict.py b/third_party/testtools/testtools/matchers/_dict.py
new file mode 100644
index 0000000..b1ec915
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_dict.py
@@ -0,0 +1,259 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'KeysEqual',
+    ]
+
+from ..helpers import (
+    dict_subtract,
+    filter_values,
+    map_values,
+    )
+from ._higherorder import (
+    AnnotatedMismatch,
+    PrefixedMismatch,
+    MismatchesAll,
+    )
+from ._impl import Matcher, Mismatch
+
+
+def LabelledMismatches(mismatches, details=None):
+    """A collection of mismatches, each labelled."""
+    return MismatchesAll(
+        (PrefixedMismatch(k, v) for (k, v) in sorted(mismatches.items())),
+        wrap=False)
+
+
+class MatchesAllDict(Matcher):
+    """Matches if all of the matchers it is created with match.
+
+    A lot like ``MatchesAll``, but takes a dict of Matchers and labels any
+    mismatches with the key of the dictionary.
+    """
+
+    def __init__(self, matchers):
+        super(MatchesAllDict, self).__init__()
+        self.matchers = matchers
+
+    def __str__(self):
+        return 'MatchesAllDict(%s)' % (_format_matcher_dict(self.matchers),)
+
+    def match(self, observed):
+        mismatches = {}
+        for label in self.matchers:
+            mismatches[label] = self.matchers[label].match(observed)
+        return _dict_to_mismatch(
+            mismatches, result_mismatch=LabelledMismatches)
+
+
+class DictMismatches(Mismatch):
+    """A mismatch with a dict of child mismatches."""
+
+    def __init__(self, mismatches, details=None):
+        super(DictMismatches, self).__init__(None, details=details)
+        self.mismatches = mismatches
+
+    def describe(self):
+        lines = ['{']
+        lines.extend(
+            ['  %r: %s,' % (key, mismatch.describe())
+             for (key, mismatch) in sorted(self.mismatches.items())])
+        lines.append('}')
+        return '\n'.join(lines)
+
+
+def _dict_to_mismatch(data, to_mismatch=None,
+                      result_mismatch=DictMismatches):
+    if to_mismatch:
+        data = map_values(to_mismatch, data)
+    mismatches = filter_values(bool, data)
+    if mismatches:
+        return result_mismatch(mismatches)
+
+
+class _MatchCommonKeys(Matcher):
+    """Match on keys in a dictionary.
+
+    Given a dictionary where the values are matchers, this will look for
+    common keys in the matched dictionary and match if and only if all common
+    keys match the given matchers.
+
+    Thus::
+
+      >>> structure = {'a': Equals('x'), 'b': Equals('y')}
+      >>> _MatchCommonKeys(structure).match({'a': 'x', 'c': 'z'})
+      None
+    """
+
+    def __init__(self, dict_of_matchers):
+        super(_MatchCommonKeys, self).__init__()
+        self._matchers = dict_of_matchers
+
+    def _compare_dicts(self, expected, observed):
+        common_keys = set(expected.keys()) & set(observed.keys())
+        mismatches = {}
+        for key in common_keys:
+            mismatch = expected[key].match(observed[key])
+            if mismatch:
+                mismatches[key] = mismatch
+        return mismatches
+
+    def match(self, observed):
+        mismatches = self._compare_dicts(self._matchers, observed)
+        if mismatches:
+            return DictMismatches(mismatches)
+
+
+class _SubDictOf(Matcher):
+    """Matches if the matched dict only has keys that are in given dict."""
+
+    def __init__(self, super_dict, format_value=repr):
+        super(_SubDictOf, self).__init__()
+        self.super_dict = super_dict
+        self.format_value = format_value
+
+    def match(self, observed):
+        excess = dict_subtract(observed, self.super_dict)
+        return _dict_to_mismatch(
+            excess, lambda v: Mismatch(self.format_value(v)))
+
+
+class _SuperDictOf(Matcher):
+    """Matches if all of the keys in the given dict are in the matched dict.
+    """
+
+    def __init__(self, sub_dict, format_value=repr):
+        super(_SuperDictOf, self).__init__()
+        self.sub_dict = sub_dict
+        self.format_value = format_value
+
+    def match(self, super_dict):
+        return _SubDictOf(super_dict, self.format_value).match(self.sub_dict)
+
+
+def _format_matcher_dict(matchers):
+    return '{%s}' % (
+        ', '.join(sorted('%r: %s' % (k, v) for k, v in matchers.items())))
+
+
+class _CombinedMatcher(Matcher):
+    """Many matchers labelled and combined into one uber-matcher.
+
+    Subclass this and then specify a dict of matcher factories that take a
+    single 'expected' value and return a matcher.  The subclass will match
+    only if all of the matchers made from factories match.
+
+    Not **entirely** dissimilar from ``MatchesAll``.
+    """
+
+    matcher_factories = {}
+
+    def __init__(self, expected):
+        super(_CombinedMatcher, self).__init__()
+        self._expected = expected
+
+    def format_expected(self, expected):
+        return repr(expected)
+
+    def __str__(self):
+        return '%s(%s)' % (
+            self.__class__.__name__, self.format_expected(self._expected))
+
+    def match(self, observed):
+        matchers = dict(
+            (k, v(self._expected)) for k, v in self.matcher_factories.items())
+        return MatchesAllDict(matchers).match(observed)
+
+
+class MatchesDict(_CombinedMatcher):
+    """Match a dictionary exactly, by its keys.
+
+    Specify a dictionary mapping keys (often strings) to matchers.  This is
+    the 'expected' dict.  Any dictionary that matches this must have exactly
+    the same keys, and the values must match the corresponding matchers in the
+    expected dict.
+    """
+
+    matcher_factories = {
+        'Extra': _SubDictOf,
+        'Missing': lambda m: _SuperDictOf(m, format_value=str),
+        'Differences': _MatchCommonKeys,
+        }
+
+    format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainsDict(_CombinedMatcher):
+    """Match a dictionary for that contains a specified sub-dictionary.
+
+    Specify a dictionary mapping keys (often strings) to matchers.  This is
+    the 'expected' dict.  Any dictionary that matches this must have **at
+    least** these keys, and the values must match the corresponding matchers
+    in the expected dict.  Dictionaries that have more keys will also match.
+
+    In other words, any matching dictionary must contain the dictionary given
+    to the constructor.
+
+    Does not check for strict sub-dictionary.  That is, equal dictionaries
+    match.
+    """
+
+    matcher_factories = {
+        'Missing': lambda m: _SuperDictOf(m, format_value=str),
+        'Differences': _MatchCommonKeys,
+        }
+
+    format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class ContainedByDict(_CombinedMatcher):
+    """Match a dictionary for which this is a super-dictionary.
+
+    Specify a dictionary mapping keys (often strings) to matchers.  This is
+    the 'expected' dict.  Any dictionary that matches this must have **only**
+    these keys, and the values must match the corresponding matchers in the
+    expected dict.  Dictionaries that have fewer keys can also match.
+
+    In other words, any matching dictionary must be contained by the
+    dictionary given to the constructor.
+
+    Does not check for strict super-dictionary.  That is, equal dictionaries
+    match.
+    """
+
+    matcher_factories = {
+        'Extra': _SubDictOf,
+        'Differences': _MatchCommonKeys,
+        }
+
+    format_expected = lambda self, expected: _format_matcher_dict(expected)
+
+
+class KeysEqual(Matcher):
+    """Checks whether a dict has particular keys."""
+
+    def __init__(self, *expected):
+        """Create a `KeysEqual` Matcher.
+
+        :param expected: The keys the dict is expected to have.  If a dict,
+            then we use the keys of that dict, if a collection, we assume it
+            is a collection of expected keys.
+        """
+        super(KeysEqual, self).__init__()
+        try:
+            self.expected = expected[0].keys()
+        except AttributeError:
+            self.expected = list(expected)
+
+    def __str__(self):
+        return "KeysEqual(%s)" % ', '.join(map(repr, self.expected))
+
+    def match(self, matchee):
+        from ._basic import _BinaryMismatch, Equals
+        expected = sorted(self.expected)
+        matched = Equals(expected).match(sorted(matchee.keys()))
+        if matched:
+            return AnnotatedMismatch(
+                'Keys not equal',
+                _BinaryMismatch(expected, 'does not match', matchee))
+        return None
diff --git a/third_party/testtools/testtools/matchers/_doctest.py b/third_party/testtools/testtools/matchers/_doctest.py
new file mode 100644
index 0000000..41f3c00
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_doctest.py
@@ -0,0 +1,104 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'DocTestMatches',
+    ]
+
+import doctest
+import re
+
+from ..compat import str_is_unicode
+from ._impl import Mismatch
+
+
+class _NonManglingOutputChecker(doctest.OutputChecker):
+    """Doctest checker that works with unicode rather than mangling strings
+
+    This is needed because current Python versions have tried to fix string
+    encoding related problems, but regressed the default behaviour with
+    unicode inputs in the process.
+
+    In Python 2.6 and 2.7 ``OutputChecker.output_difference`` is was changed
+    to return a bytestring encoded as per ``sys.stdout.encoding``, or utf-8 if
+    that can't be determined. Worse, that encoding process happens in the
+    innocent looking `_indent` global function. Because the
+    `DocTestMismatch.describe` result may well not be destined for printing to
+    stdout, this is no good for us. To get a unicode return as before, the
+    method is monkey patched if ``doctest._encoding`` exists.
+
+    Python 3 has a different problem. For some reason both inputs are encoded
+    to ascii with 'backslashreplace', making an escaped string matches its
+    unescaped form. Overriding the offending ``OutputChecker._toAscii`` method
+    is sufficient to revert this.
+    """
+
+    def _toAscii(self, s):
+        """Return ``s`` unchanged rather than mangling it to ascii"""
+        return s
+
+    # Only do this overriding hackery if doctest has a broken _input function
+    if getattr(doctest, "_encoding", None) is not None:
+        from types import FunctionType as __F
+        __f = doctest.OutputChecker.output_difference.im_func
+        __g = dict(__f.func_globals)
+        def _indent(s, indent=4, _pattern=re.compile("^(?!$)", re.MULTILINE)):
+            """Prepend non-empty lines in ``s`` with ``indent`` number of spaces"""
+            return _pattern.sub(indent*" ", s)
+        __g["_indent"] = _indent
+        output_difference = __F(__f.func_code, __g, "output_difference")
+        del __F, __f, __g, _indent
+
+
+class DocTestMatches(object):
+    """See if a string matches a doctest example."""
+
+    def __init__(self, example, flags=0):
+        """Create a DocTestMatches to match example.
+
+        :param example: The example to match e.g. 'foo bar baz'
+        :param flags: doctest comparison flags to match on. e.g.
+            doctest.ELLIPSIS.
+        """
+        if not example.endswith('\n'):
+            example += '\n'
+        self.want = example # required variable name by doctest.
+        self.flags = flags
+        self._checker = _NonManglingOutputChecker()
+
+    def __str__(self):
+        if self.flags:
+            flagstr = ", flags=%d" % self.flags
+        else:
+            flagstr = ""
+        return 'DocTestMatches(%r%s)' % (self.want, flagstr)
+
+    def _with_nl(self, actual):
+        result = self.want.__class__(actual)
+        if not result.endswith('\n'):
+            result += '\n'
+        return result
+
+    def match(self, actual):
+        with_nl = self._with_nl(actual)
+        if self._checker.check_output(self.want, with_nl, self.flags):
+            return None
+        return DocTestMismatch(self, with_nl)
+
+    def _describe_difference(self, with_nl):
+        return self._checker.output_difference(self, with_nl, self.flags)
+
+
+class DocTestMismatch(Mismatch):
+    """Mismatch object for DocTestMatches."""
+
+    def __init__(self, matcher, with_nl):
+        self.matcher = matcher
+        self.with_nl = with_nl
+
+    def describe(self):
+        s = self.matcher._describe_difference(self.with_nl)
+        if str_is_unicode or isinstance(s, unicode):
+            return s
+        # GZ 2011-08-24: This is actually pretty bogus, most C0 codes should
+        #                be escaped, in addition to non-ascii bytes.
+        return s.decode("latin1").encode("ascii", "backslashreplace")
diff --git a/third_party/testtools/testtools/matchers/_exception.py b/third_party/testtools/testtools/matchers/_exception.py
new file mode 100644
index 0000000..cd4c90b
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_exception.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'MatchesException',
+    'Raises',
+    'raises',
+    ]
+
+import sys
+
+from testtools.compat import (
+    classtypes,
+    istext,
+    )
+from ._basic import MatchesRegex
+from ._higherorder import AfterPreproccessing
+from ._impl import (
+    Matcher,
+    Mismatch,
+    )
+
+
+_error_repr = BaseException.__repr__
+
+
+def _is_exception(exc):
+    return isinstance(exc, BaseException)
+
+
+def _is_user_exception(exc):
+    return isinstance(exc, Exception)
+
+
+class MatchesException(Matcher):
+    """Match an exc_info tuple against an exception instance or type."""
+
+    def __init__(self, exception, value_re=None):
+        """Create a MatchesException that will match exc_info's for exception.
+
+        :param exception: Either an exception instance or type.
+            If an instance is given, the type and arguments of the exception
+            are checked. If a type is given only the type of the exception is
+            checked. If a tuple is given, then as with isinstance, any of the
+            types in the tuple matching is sufficient to match.
+        :param value_re: If 'exception' is a type, and the matchee exception
+            is of the right type, then match against this.  If value_re is a
+            string, then assume value_re is a regular expression and match
+            the str() of the exception against it.  Otherwise, assume value_re
+            is a matcher, and match the exception against it.
+        """
+        Matcher.__init__(self)
+        self.expected = exception
+        if istext(value_re):
+            value_re = AfterPreproccessing(str, MatchesRegex(value_re), False)
+        self.value_re = value_re
+        expected_type = type(self.expected)
+        self._is_instance = not any(issubclass(expected_type, class_type)
+                for class_type in classtypes() + (tuple,))
+
+    def match(self, other):
+        if type(other) != tuple:
+            return Mismatch('%r is not an exc_info tuple' % other)
+        expected_class = self.expected
+        if self._is_instance:
+            expected_class = expected_class.__class__
+        if not issubclass(other[0], expected_class):
+            return Mismatch('%r is not a %r' % (other[0], expected_class))
+        if self._is_instance:
+            if other[1].args != self.expected.args:
+                return Mismatch('%s has different arguments to %s.' % (
+                        _error_repr(other[1]), _error_repr(self.expected)))
+        elif self.value_re is not None:
+            return self.value_re.match(other[1])
+
+    def __str__(self):
+        if self._is_instance:
+            return "MatchesException(%s)" % _error_repr(self.expected)
+        return "MatchesException(%s)" % repr(self.expected)
+
+
+class Raises(Matcher):
+    """Match if the matchee raises an exception when called.
+
+    Exceptions which are not subclasses of Exception propogate out of the
+    Raises.match call unless they are explicitly matched.
+    """
+
+    def __init__(self, exception_matcher=None):
+        """Create a Raises matcher.
+
+        :param exception_matcher: Optional validator for the exception raised
+            by matchee. If supplied the exc_info tuple for the exception raised
+            is passed into that matcher. If no exception_matcher is supplied
+            then the simple fact of raising an exception is considered enough
+            to match on.
+        """
+        self.exception_matcher = exception_matcher
+
+    def match(self, matchee):
+        try:
+            result = matchee()
+            return Mismatch('%r returned %r' % (matchee, result))
+        # Catch all exceptions: Raises() should be able to match a
+        # KeyboardInterrupt or SystemExit.
+        except:
+            exc_info = sys.exc_info()
+            if self.exception_matcher:
+                mismatch = self.exception_matcher.match(exc_info)
+                if not mismatch:
+                    del exc_info
+                    return
+            else:
+                mismatch = None
+            # The exception did not match, or no explicit matching logic was
+            # performed. If the exception is a non-user exception then
+            # propagate it.
+            exception = exc_info[1]
+            if _is_exception(exception) and not _is_user_exception(exception):
+                del exc_info
+                raise
+            return mismatch
+
+    def __str__(self):
+        return 'Raises()'
+
+
+def raises(exception):
+    """Make a matcher that checks that a callable raises an exception.
+
+    This is a convenience function, exactly equivalent to::
+
+        return Raises(MatchesException(exception))
+
+    See `Raises` and `MatchesException` for more information.
+    """
+    return Raises(MatchesException(exception))
diff --git a/third_party/testtools/testtools/matchers/_filesystem.py b/third_party/testtools/testtools/matchers/_filesystem.py
new file mode 100644
index 0000000..54f749b
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_filesystem.py
@@ -0,0 +1,192 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers for things related to the filesystem."""
+
+__all__ = [
+    'FileContains',
+    'DirExists',
+    'FileExists',
+    'HasPermissions',
+    'PathExists',
+    'SamePath',
+    'TarballContains',
+    ]
+
+import os
+import tarfile
+
+from ._basic import Equals
+from ._higherorder import (
+    MatchesAll,
+    MatchesPredicate,
+    )
+from ._impl import (
+    Matcher,
+    )
+
+
+def PathExists():
+    """Matches if the given path exists.
+
+    Use like this::
+
+      assertThat('/some/path', PathExists())
+    """
+    return MatchesPredicate(os.path.exists, "%s does not exist.")
+
+
+def DirExists():
+    """Matches if the path exists and is a directory."""
+    return MatchesAll(
+        PathExists(),
+        MatchesPredicate(os.path.isdir, "%s is not a directory."),
+        first_only=True)
+
+
+def FileExists():
+    """Matches if the given path exists and is a file."""
+    return MatchesAll(
+        PathExists(),
+        MatchesPredicate(os.path.isfile, "%s is not a file."),
+        first_only=True)
+
+
+class DirContains(Matcher):
+    """Matches if the given directory contains files with the given names.
+
+    That is, is the directory listing exactly equal to the given files?
+    """
+
+    def __init__(self, filenames=None, matcher=None):
+        """Construct a ``DirContains`` matcher.
+
+        Can be used in a basic mode where the whole directory listing is
+        matched against an expected directory listing (by passing
+        ``filenames``).  Can also be used in a more advanced way where the
+        whole directory listing is matched against an arbitrary matcher (by
+        passing ``matcher`` instead).
+
+        :param filenames: If specified, match the sorted directory listing
+            against this list of filenames, sorted.
+        :param matcher: If specified, match the sorted directory listing
+            against this matcher.
+        """
+        if filenames == matcher == None:
+            raise AssertionError(
+                "Must provide one of `filenames` or `matcher`.")
+        if None not in (filenames, matcher):
+            raise AssertionError(
+                "Must provide either `filenames` or `matcher`, not both.")
+        if filenames is None:
+            self.matcher = matcher
+        else:
+            self.matcher = Equals(sorted(filenames))
+
+    def match(self, path):
+        mismatch = DirExists().match(path)
+        if mismatch is not None:
+            return mismatch
+        return self.matcher.match(sorted(os.listdir(path)))
+
+
+class FileContains(Matcher):
+    """Matches if the given file has the specified contents."""
+
+    def __init__(self, contents=None, matcher=None):
+        """Construct a ``FileContains`` matcher.
+
+        Can be used in a basic mode where the file contents are compared for
+        equality against the expected file contents (by passing ``contents``).
+        Can also be used in a more advanced way where the file contents are
+        matched against an arbitrary matcher (by passing ``matcher`` instead).
+
+        :param contents: If specified, match the contents of the file with
+            these contents.
+        :param matcher: If specified, match the contents of the file against
+            this matcher.
+        """
+        if contents == matcher == None:
+            raise AssertionError(
+                "Must provide one of `contents` or `matcher`.")
+        if None not in (contents, matcher):
+            raise AssertionError(
+                "Must provide either `contents` or `matcher`, not both.")
+        if matcher is None:
+            self.matcher = Equals(contents)
+        else:
+            self.matcher = matcher
+
+    def match(self, path):
+        mismatch = PathExists().match(path)
+        if mismatch is not None:
+            return mismatch
+        f = open(path)
+        try:
+            actual_contents = f.read()
+            return self.matcher.match(actual_contents)
+        finally:
+            f.close()
+
+    def __str__(self):
+        return "File at path exists and contains %s" % self.contents
+
+
+class HasPermissions(Matcher):
+    """Matches if a file has the given permissions.
+
+    Permissions are specified and matched as a four-digit octal string.
+    """
+
+    def __init__(self, octal_permissions):
+        """Construct a HasPermissions matcher.
+
+        :param octal_permissions: A four digit octal string, representing the
+            intended access permissions. e.g. '0775' for rwxrwxr-x.
+        """
+        super(HasPermissions, self).__init__()
+        self.octal_permissions = octal_permissions
+
+    def match(self, filename):
+        permissions = oct(os.stat(filename).st_mode)[-4:]
+        return Equals(self.octal_permissions).match(permissions)
+
+
+class SamePath(Matcher):
+    """Matches if two paths are the same.
+
+    That is, the paths are equal, or they point to the same file but in
+    different ways.  The paths do not have to exist.
+    """
+
+    def __init__(self, path):
+        super(SamePath, self).__init__()
+        self.path = path
+
+    def match(self, other_path):
+        f = lambda x: os.path.abspath(os.path.realpath(x))
+        return Equals(f(self.path)).match(f(other_path))
+
+
+class TarballContains(Matcher):
+    """Matches if the given tarball contains the given paths.
+
+    Uses TarFile.getnames() to get the paths out of the tarball.
+    """
+
+    def __init__(self, paths):
+        super(TarballContains, self).__init__()
+        self.paths = paths
+        self.path_matcher = Equals(sorted(self.paths))
+
+    def match(self, tarball_path):
+        # Open underlying file first to ensure it's always closed:
+        # <http://bugs.python.org/issue10233>
+        f = open(tarball_path, "rb")
+        try:
+            tarball = tarfile.open(tarball_path, fileobj=f)
+            try:
+                return self.path_matcher.match(sorted(tarball.getnames()))
+            finally:
+                tarball.close()
+        finally:
+            f.close()
diff --git a/third_party/testtools/testtools/matchers/_higherorder.py b/third_party/testtools/testtools/matchers/_higherorder.py
new file mode 100644
index 0000000..3570f57
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_higherorder.py
@@ -0,0 +1,368 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+__all__ = [
+    'AfterPreprocessing',
+    'AllMatch',
+    'Annotate',
+    'AnyMatch',
+    'MatchesAny',
+    'MatchesAll',
+    'Not',
+    ]
+
+import types
+
+from ._impl import (
+    Matcher,
+    Mismatch,
+    MismatchDecorator,
+    )
+
+
+class MatchesAny(object):
+    """Matches if any of the matchers it is created with match."""
+
+    def __init__(self, *matchers):
+        self.matchers = matchers
+
+    def match(self, matchee):
+        results = []
+        for matcher in self.matchers:
+            mismatch = matcher.match(matchee)
+            if mismatch is None:
+                return None
+            results.append(mismatch)
+        return MismatchesAll(results)
+
+    def __str__(self):
+        return "MatchesAny(%s)" % ', '.join([
+            str(matcher) for matcher in self.matchers])
+
+
+class MatchesAll(object):
+    """Matches if all of the matchers it is created with match."""
+
+    def __init__(self, *matchers, **options):
+        """Construct a MatchesAll matcher.
+
+        Just list the component matchers as arguments in the ``*args``
+        style. If you want only the first mismatch to be reported, past in
+        first_only=True as a keyword argument. By default, all mismatches are
+        reported.
+        """
+        self.matchers = matchers
+        self.first_only = options.get('first_only', False)
+
+    def __str__(self):
+        return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
+
+    def match(self, matchee):
+        results = []
+        for matcher in self.matchers:
+            mismatch = matcher.match(matchee)
+            if mismatch is not None:
+                if self.first_only:
+                    return mismatch
+                results.append(mismatch)
+        if results:
+            return MismatchesAll(results)
+        else:
+            return None
+
+
+class MismatchesAll(Mismatch):
+    """A mismatch with many child mismatches."""
+
+    def __init__(self, mismatches, wrap=True):
+        self.mismatches = mismatches
+        self._wrap = wrap
+
+    def describe(self):
+        descriptions = []
+        if self._wrap:
+            descriptions = ["Differences: ["]
+        for mismatch in self.mismatches:
+            descriptions.append(mismatch.describe())
+        if self._wrap:
+            descriptions.append("]")
+        return '\n'.join(descriptions)
+
+
+class Not(object):
+    """Inverts a matcher."""
+
+    def __init__(self, matcher):
+        self.matcher = matcher
+
+    def __str__(self):
+        return 'Not(%s)' % (self.matcher,)
+
+    def match(self, other):
+        mismatch = self.matcher.match(other)
+        if mismatch is None:
+            return MatchedUnexpectedly(self.matcher, other)
+        else:
+            return None
+
+
+class MatchedUnexpectedly(Mismatch):
+    """A thing matched when it wasn't supposed to."""
+
+    def __init__(self, matcher, other):
+        self.matcher = matcher
+        self.other = other
+
+    def describe(self):
+        return "%r matches %s" % (self.other, self.matcher)
+
+
+class Annotate(object):
+    """Annotates a matcher with a descriptive string.
+
+    Mismatches are then described as '<mismatch>: <annotation>'.
+    """
+
+    def __init__(self, annotation, matcher):
+        self.annotation = annotation
+        self.matcher = matcher
+
+    @classmethod
+    def if_message(cls, annotation, matcher):
+        """Annotate ``matcher`` only if ``annotation`` is non-empty."""
+        if not annotation:
+            return matcher
+        return cls(annotation, matcher)
+
+    def __str__(self):
+        return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
+
+    def match(self, other):
+        mismatch = self.matcher.match(other)
+        if mismatch is not None:
+            return AnnotatedMismatch(self.annotation, mismatch)
+
+
+class PostfixedMismatch(MismatchDecorator):
+    """A mismatch annotated with a descriptive string."""
+
+    def __init__(self, annotation, mismatch):
+        super(PostfixedMismatch, self).__init__(mismatch)
+        self.annotation = annotation
+        self.mismatch = mismatch
+
+    def describe(self):
+        return '%s: %s' % (self.original.describe(), self.annotation)
+
+
+AnnotatedMismatch = PostfixedMismatch
+
+
+class PrefixedMismatch(MismatchDecorator):
+
+    def __init__(self, prefix, mismatch):
+        super(PrefixedMismatch, self).__init__(mismatch)
+        self.prefix = prefix
+
+    def describe(self):
+        return '%s: %s' % (self.prefix, self.original.describe())
+
+
+class AfterPreprocessing(object):
+    """Matches if the value matches after passing through a function.
+
+    This can be used to aid in creating trivial matchers as functions, for
+    example::
+
+      def PathHasFileContent(content):
+          def _read(path):
+              return open(path).read()
+          return AfterPreprocessing(_read, Equals(content))
+    """
+
+    def __init__(self, preprocessor, matcher, annotate=True):
+        """Create an AfterPreprocessing matcher.
+
+        :param preprocessor: A function called with the matchee before
+            matching.
+        :param matcher: What to match the preprocessed matchee against.
+        :param annotate: Whether or not to annotate the matcher with
+            something explaining how we transformed the matchee. Defaults
+            to True.
+        """
+        self.preprocessor = preprocessor
+        self.matcher = matcher
+        self.annotate = annotate
+
+    def _str_preprocessor(self):
+        if isinstance(self.preprocessor, types.FunctionType):
+            return '<function %s>' % self.preprocessor.__name__
+        return str(self.preprocessor)
+
+    def __str__(self):
+        return "AfterPreprocessing(%s, %s)" % (
+            self._str_preprocessor(), self.matcher)
+
+    def match(self, value):
+        after = self.preprocessor(value)
+        if self.annotate:
+            matcher = Annotate(
+                "after %s on %r" % (self._str_preprocessor(), value),
+                self.matcher)
+        else:
+            matcher = self.matcher
+        return matcher.match(after)
+
+
+# This is the old, deprecated. spelling of the name, kept for backwards
+# compatibility.
+AfterPreproccessing = AfterPreprocessing
+
+
+class AllMatch(object):
+    """Matches if all provided values match the given matcher."""
+
+    def __init__(self, matcher):
+        self.matcher = matcher
+
+    def __str__(self):
+        return 'AllMatch(%s)' % (self.matcher,)
+
+    def match(self, values):
+        mismatches = []
+        for value in values:
+            mismatch = self.matcher.match(value)
+            if mismatch:
+                mismatches.append(mismatch)
+        if mismatches:
+            return MismatchesAll(mismatches)
+
+
+class AnyMatch(object):
+    """Matches if any of the provided values match the given matcher."""
+
+    def __init__(self, matcher):
+        self.matcher = matcher
+
+    def __str__(self):
+        return 'AnyMatch(%s)' % (self.matcher,)
+
+    def match(self, values):
+        mismatches = []
+        for value in values:
+            mismatch = self.matcher.match(value)
+            if mismatch:
+                mismatches.append(mismatch)
+            else:
+                return None
+        return MismatchesAll(mismatches)
+
+
+class MatchesPredicate(Matcher):
+    """Match if a given function returns True.
+
+    It is reasonably common to want to make a very simple matcher based on a
+    function that you already have that returns True or False given a single
+    argument (i.e. a predicate function).  This matcher makes it very easy to
+    do so. e.g.::
+
+      IsEven = MatchesPredicate(lambda x: x % 2 == 0, '%s is not even')
+      self.assertThat(4, IsEven)
+    """
+
+    def __init__(self, predicate, message):
+        """Create a ``MatchesPredicate`` matcher.
+
+        :param predicate: A function that takes a single argument and returns
+            a value that will be interpreted as a boolean.
+        :param message: A message to describe a mismatch.  It will be formatted
+            with '%' and be given whatever was passed to ``match()``. Thus, it
+            needs to contain exactly one thing like '%s', '%d' or '%f'.
+        """
+        self.predicate = predicate
+        self.message = message
+
+    def __str__(self):
+        return '%s(%r, %r)' % (
+            self.__class__.__name__, self.predicate, self.message)
+
+    def match(self, x):
+        if not self.predicate(x):
+            return Mismatch(self.message % x)
+
+
+def MatchesPredicateWithParams(predicate, message, name=None):
+    """Match if a given parameterised function returns True.
+
+    It is reasonably common to want to make a very simple matcher based on a
+    function that you already have that returns True or False given some
+    arguments. This matcher makes it very easy to do so. e.g.::
+
+      HasLength = MatchesPredicate(
+          lambda x, y: len(x) == y, 'len({0}) is not {1}')
+      # This assertion will fail, as 'len([1, 2]) == 3' is False.
+      self.assertThat([1, 2], HasLength(3))
+
+    Note that unlike MatchesPredicate MatchesPredicateWithParams returns a
+    factory which you then customise to use by constructing an actual matcher
+    from it.
+
+    The predicate function should take the object to match as its first
+    parameter. Any additional parameters supplied when constructing a matcher
+    are supplied to the predicate as additional parameters when checking for a
+    match.
+
+    :param predicate: The predicate function.
+    :param message: A format string for describing mis-matches.
+    :param name: Optional replacement name for the matcher.
+    """
+    def construct_matcher(*args, **kwargs):
+        return _MatchesPredicateWithParams(
+            predicate, message, name, *args, **kwargs)
+    return construct_matcher
+
+
+class _MatchesPredicateWithParams(Matcher):
+
+    def __init__(self, predicate, message, name, *args, **kwargs):
+        """Create a ``MatchesPredicateWithParams`` matcher.
+
+        :param predicate: A function that takes an object to match and
+            additional params as given in ``*args`` and ``**kwargs``. The
+            result of the function will be interpreted as a boolean to
+            determine a match.
+        :param message: A message to describe a mismatch.  It will be formatted
+            with .format() and be given a tuple containing whatever was passed
+            to ``match()`` + ``*args`` in ``*args``, and whatever was passed to
+            ``**kwargs`` as its ``**kwargs``.
+
+            For instance, to format a single parameter::
+
+                "{0} is not a {1}"
+
+            To format a keyword arg::
+
+                "{0} is not a {type_to_check}"
+        :param name: What name to use for the matcher class. Pass None to use
+            the default.
+        """
+        self.predicate = predicate
+        self.message = message
+        self.name = name
+        self.args = args
+        self.kwargs = kwargs
+
+    def __str__(self):
+        args = [str(arg) for arg in self.args]
+        kwargs = ["%s=%s" % item for item in self.kwargs.items()]
+        args = ", ".join(args + kwargs)
+        if self.name is None:
+            name = 'MatchesPredicateWithParams(%r, %r)' % (
+                self.predicate, self.message)
+        else:
+            name = self.name
+        return '%s(%s)' % (name, args)
+
+    def match(self, x):
+        if not self.predicate(x, *self.args, **self.kwargs):
+            return Mismatch(
+                self.message.format(*((x,) + self.args), **self.kwargs))
diff --git a/third_party/testtools/testtools/matchers/_impl.py b/third_party/testtools/testtools/matchers/_impl.py
new file mode 100644
index 0000000..19a93af
--- /dev/null
+++ b/third_party/testtools/testtools/matchers/_impl.py
@@ -0,0 +1,173 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Matchers, a way to express complex assertions outside the testcase.
+
+Inspired by 'hamcrest'.
+
+Matcher provides the abstract API that all matchers need to implement.
+
+Bundled matchers are listed in __all__: a list can be obtained by running
+$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
+"""
+
+__all__ = [
+    'Matcher',
+    'Mismatch',
+    'MismatchDecorator',
+    'MismatchError',
+    ]
+
+from testtools.compat import (
+    _isbytes,
+    istext,
+    str_is_unicode,
+    text_repr
+    )
+
+
+class Matcher(object):
+    """A pattern matcher.
+
+    A Matcher must implement match and __str__ to be used by
+    testtools.TestCase.assertThat. Matcher.match(thing) returns None when
+    thing is completely matched, and a Mismatch object otherwise.
+
+    Matchers can be useful outside of test cases, as they are simply a
+    pattern matching language expressed as objects.
+
+    testtools.matchers is inspired by hamcrest, but is pythonic rather than
+    a Java transcription.
+    """
+
+    def match(self, something):
+        """Return None if this matcher matches something, a Mismatch otherwise.
+        """
+        raise NotImplementedError(self.match)
+
+    def __str__(self):
+        """Get a sensible human representation of the matcher.
+
+        This should include the parameters given to the matcher and any
+        state that would affect the matches operation.
+        """
+        raise NotImplementedError(self.__str__)
+
+
+class Mismatch(object):
+    """An object describing a mismatch detected by a Matcher."""
+
+    def __init__(self, description=None, details=None):
+        """Construct a `Mismatch`.
+
+        :param description: A description to use.  If not provided,
+            `Mismatch.describe` must be implemented.
+        :param details: Extra details about the mismatch.  Defaults
+            to the empty dict.
+        """
+        if description:
+            self._description = description
+        if details is None:
+            details = {}
+        self._details = details
+
+    def describe(self):
+        """Describe the mismatch.
+
+        This should be either a human-readable string or castable to a string.
+        In particular, is should either be plain ascii or unicode on Python 2,
+        and care should be taken to escape control characters.
+        """
+        try:
+            return self._description
+        except AttributeError:
+            raise NotImplementedError(self.describe)
+
+    def get_details(self):
+        """Get extra details about the mismatch.
+
+        This allows the mismatch to provide extra information beyond the basic
+        description, including large text or binary files, or debugging internals
+        without having to force it to fit in the output of 'describe'.
+
+        The testtools assertion assertThat will query get_details and attach
+        all its values to the test, permitting them to be reported in whatever
+        manner the test environment chooses.
+
+        :return: a dict mapping names to Content objects. name is a string to
+            name the detail, and the Content object is the detail to add
+            to the result. For more information see the API to which items from
+            this dict are passed testtools.TestCase.addDetail.
+        """
+        return getattr(self, '_details', {})
+
+    def __repr__(self):
+        return  "<testtools.matchers.Mismatch object at %x attributes=%r>" % (
+            id(self), self.__dict__)
+
+
+class MismatchError(AssertionError):
+    """Raised when a mismatch occurs."""
+
+    # This class exists to work around
+    # <https://bugs.launchpad.net/testtools/+bug/804127>.  It provides a
+    # guaranteed way of getting a readable exception, no matter what crazy
+    # characters are in the matchee, matcher or mismatch.
+
+    def __init__(self, matchee, matcher, mismatch, verbose=False):
+        super(MismatchError, self).__init__()
+        self.matchee = matchee
+        self.matcher = matcher
+        self.mismatch = mismatch
+        self.verbose = verbose
+
+    def __str__(self):
+        difference = self.mismatch.describe()
+        if self.verbose:
+            # GZ 2011-08-24: Smelly API? Better to take any object and special
+            #                case text inside?
+            if istext(self.matchee) or _isbytes(self.matchee):
+                matchee = text_repr(self.matchee, multiline=False)
+            else:
+                matchee = repr(self.matchee)
+            return (
+                'Match failed. Matchee: %s\nMatcher: %s\nDifference: %s\n'
+                % (matchee, self.matcher, difference))
+        else:
+            return difference
+
+    if not str_is_unicode:
+
+        __unicode__ = __str__
+
+        def __str__(self):
+            return self.__unicode__().encode("ascii", "backslashreplace")
+
+
+class MismatchDecorator(object):
+    """Decorate a ``Mismatch``.
+
+    Forwards all messages to the original mismatch object.  Probably the best
+    way to use this is inherit from this class and then provide your own
+    custom decoration logic.
+    """
+
+    def __init__(self, original):
+        """Construct a `MismatchDecorator`.
+
+        :param original: A `Mismatch` object to decorate.
+        """
+        self.original = original
+
+    def __repr__(self):
+        return '<testtools.matchers.MismatchDecorator(%r)>' % (self.original,)
+
+    def describe(self):
+        return self.original.describe()
+
+    def get_details(self):
+        return self.original.get_details()
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/third_party/testtools/testtools/monkey.py b/third_party/testtools/testtools/monkey.py
new file mode 100644
index 0000000..ba0ac8f
--- /dev/null
+++ b/third_party/testtools/testtools/monkey.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Helpers for monkey-patching Python code."""
+
+__all__ = [
+    'MonkeyPatcher',
+    'patch',
+    ]
+
+
+class MonkeyPatcher(object):
+    """A set of monkey-patches that can be applied and removed all together.
+
+    Use this to cover up attributes with new objects. Particularly useful for
+    testing difficult code.
+    """
+
+    # Marker used to indicate that the patched attribute did not exist on the
+    # object before we patched it.
+    _NO_SUCH_ATTRIBUTE = object()
+
+    def __init__(self, *patches):
+        """Construct a `MonkeyPatcher`.
+
+        :param patches: The patches to apply, each should be (obj, name,
+            new_value). Providing patches here is equivalent to calling
+            `add_patch`.
+        """
+        # List of patches to apply in (obj, name, value).
+        self._patches_to_apply = []
+        # List of the original values for things that have been patched.
+        # (obj, name, value) format.
+        self._originals = []
+        for patch in patches:
+            self.add_patch(*patch)
+
+    def add_patch(self, obj, name, value):
+        """Add a patch to overwrite 'name' on 'obj' with 'value'.
+
+        The attribute C{name} on C{obj} will be assigned to C{value} when
+        C{patch} is called or during C{run_with_patches}.
+
+        You can restore the original values with a call to restore().
+        """
+        self._patches_to_apply.append((obj, name, value))
+
+    def patch(self):
+        """Apply all of the patches that have been specified with `add_patch`.
+
+        Reverse this operation using L{restore}.
+        """
+        for obj, name, value in self._patches_to_apply:
+            original_value = getattr(obj, name, self._NO_SUCH_ATTRIBUTE)
+            self._originals.append((obj, name, original_value))
+            setattr(obj, name, value)
+
+    def restore(self):
+        """Restore all original values to any patched objects.
+
+        If the patched attribute did not exist on an object before it was
+        patched, `restore` will delete the attribute so as to return the
+        object to its original state.
+        """
+        while self._originals:
+            obj, name, value = self._originals.pop()
+            if value is self._NO_SUCH_ATTRIBUTE:
+                delattr(obj, name)
+            else:
+                setattr(obj, name, value)
+
+    def run_with_patches(self, f, *args, **kw):
+        """Run 'f' with the given args and kwargs with all patches applied.
+
+        Restores all objects to their original state when finished.
+        """
+        self.patch()
+        try:
+            return f(*args, **kw)
+        finally:
+            self.restore()
+
+
+def patch(obj, attribute, value):
+    """Set 'obj.attribute' to 'value' and return a callable to restore 'obj'.
+
+    If 'attribute' is not set on 'obj' already, then the returned callable
+    will delete the attribute when called.
+
+    :param obj: An object to monkey-patch.
+    :param attribute: The name of the attribute to patch.
+    :param value: The value to set 'obj.attribute' to.
+    :return: A nullary callable that, when run, will restore 'obj' to its
+        original state.
+    """
+    patcher = MonkeyPatcher((obj, attribute, value))
+    patcher.patch()
+    return patcher.restore
diff --git a/third_party/testtools/testtools/run.py b/third_party/testtools/testtools/run.py
new file mode 100755
index 0000000..8421f25
--- /dev/null
+++ b/third_party/testtools/testtools/run.py
@@ -0,0 +1,535 @@
+# Copyright (c) 2009 testtools developers. See LICENSE for details.
+
+"""python -m testtools.run testspec [testspec...]
+
+Run some tests with the testtools extended API.
+
+For instance, to run the testtools test suite.
+ $ python -m testtools.run testtools.tests.test_suite
+"""
+
+from functools import partial
+import os.path
+import unittest
+import sys
+
+from extras import safe_hasattr
+
+from testtools import TextTestResult, testcase
+from testtools.compat import classtypes, istext, unicode_output_stream
+from testtools.testsuite import filter_by_ids, iterate_tests, sorted_tests
+
+
+defaultTestLoader = unittest.defaultTestLoader
+defaultTestLoaderCls = unittest.TestLoader
+
+if getattr(defaultTestLoader, 'discover', None) is None:
+    try:
+        import discover
+        defaultTestLoader = discover.DiscoveringTestLoader()
+        defaultTestLoaderCls = discover.DiscoveringTestLoader
+        have_discover = True
+        discover_impl = discover
+    except ImportError:
+        have_discover = False
+else:
+    have_discover = True
+    discover_impl = unittest.loader
+discover_fixed = False
+
+
+def list_test(test):
+    """Return the test ids that would be run if test() was run.
+
+    When things fail to import they can be represented as well, though
+    we use an ugly hack (see http://bugs.python.org/issue19746 for details)
+    to determine that. The difference matters because if a user is
+    filtering tests to run on the returned ids, a failed import can reduce
+    the visible tests but it can be impossible to tell that the selected
+    test would have been one of the imported ones.
+
+    :return: A tuple of test ids that would run and error strings
+        describing things that failed to import.
+    """
+    unittest_import_strs = set([
+        'unittest.loader.ModuleImportFailure.', 'discover.ModuleImportFailure.'
+        ])
+    test_ids = []
+    errors = []
+    for test in iterate_tests(test):
+        # Much ugly.
+        for prefix in unittest_import_strs:
+            if test.id().startswith(prefix):
+                errors.append(test.id()[len(prefix):])
+                break
+        else:
+            test_ids.append(test.id())
+    return test_ids, errors
+
+
+class TestToolsTestRunner(object):
+    """ A thunk object to support unittest.TestProgram."""
+
+    def __init__(self, verbosity=None, failfast=None, buffer=None,
+        stdout=None):
+        """Create a TestToolsTestRunner.
+
+        :param verbosity: Ignored.
+        :param failfast: Stop running tests at the first failure.
+        :param buffer: Ignored.
+        :param stdout: Stream to use for stdout.
+        """
+        self.failfast = failfast
+        if stdout is None:
+            stdout = sys.stdout
+        self.stdout = stdout
+
+    def list(self, test):
+        """List the tests that would be run if test() was run."""
+        test_ids, errors = list_test(test)
+        for test_id in test_ids:
+            self.stdout.write('%s\n' % test_id)
+        if errors:
+            self.stdout.write('Failed to import\n')
+            for test_id in errors:
+                self.stdout.write('%s\n' % test_id)
+            sys.exit(2)
+
+    def run(self, test):
+        "Run the given test case or test suite."
+        result = TextTestResult(
+            unicode_output_stream(self.stdout), failfast=self.failfast)
+        result.startTestRun()
+        try:
+            return test.run(result)
+        finally:
+            result.stopTestRun()
+
+
+####################
+# Taken from python 2.7 and slightly modified for compatibility with
+# older versions. Delete when 2.7 is the oldest supported version.
+# Modifications:
+#  - Use have_discover to raise an error if the user tries to use
+#    discovery on an old version and doesn't have discover installed.
+#  - If --catch is given check that installHandler is available, as
+#    it won't be on old python versions.
+#  - print calls have been been made single-source python3 compatibile.
+#  - exception handling likewise.
+#  - The default help has been changed to USAGE_AS_MAIN and USAGE_FROM_MODULE
+#    removed.
+#  - A tweak has been added to detect 'python -m *.run' and use a
+#    better progName in that case.
+#  - self.module is more comprehensively set to None when being invoked from
+#    the commandline - __name__ is used as a sentinel value.
+#  - --list has been added which can list tests (should be upstreamed).
+#  - --load-list has been added which can reduce the tests used (should be
+#    upstreamed).
+#  - The limitation of using getopt is declared to the user.
+#  - http://bugs.python.org/issue16709 is worked around, by sorting tests when
+#    discover is used.
+#  - We monkey-patch the discover and unittest loaders to address
+#     http://bugs.python.org/issue16662 with the proposed upstream patch.
+
+FAILFAST     = "  -f, --failfast   Stop on first failure\n"
+CATCHBREAK   = "  -c, --catch      Catch control-C and display results\n"
+BUFFEROUTPUT = "  -b, --buffer     Buffer stdout and stderr during test runs\n"
+
+USAGE_AS_MAIN = """\
+Usage: %(progName)s [options] [tests]
+
+Options:
+  -h, --help       Show this message
+  -v, --verbose    Verbose output
+  -q, --quiet      Minimal output
+  -l, --list       List tests rather than executing them.
+  --load-list      Specifies a file containing test ids, only tests matching
+                   those ids are executed.
+%(failfast)s%(catchbreak)s%(buffer)s
+Examples:
+  %(progName)s test_module               - run tests from test_module
+  %(progName)s module.TestClass          - run tests from module.TestClass
+  %(progName)s module.Class.test_method  - run specified test method
+
+All options must come before [tests].  [tests] can be a list of any number of
+test modules, classes and test methods.
+
+Alternative Usage: %(progName)s discover [options]
+
+Options:
+  -v, --verbose    Verbose output
+%(failfast)s%(catchbreak)s%(buffer)s  -s directory     Directory to start discovery ('.' default)
+  -p pattern       Pattern to match test files ('test*.py' default)
+  -t directory     Top level directory of project (default to
+                   start directory)
+  -l, --list       List tests rather than executing them.
+  --load-list      Specifies a file containing test ids, only tests matching
+                   those ids are executed.
+
+For test discovery all test modules must be importable from the top
+level directory of the project.
+"""
+
+
+class TestProgram(object):
+    """A command-line program that runs a set of tests; this is primarily
+       for making test modules conveniently executable.
+    """
+    USAGE = USAGE_AS_MAIN
+
+    # defaults for testing
+    failfast = catchbreak = buffer = progName = None
+
+    def __init__(self, module=__name__, defaultTest=None, argv=None,
+                    testRunner=None, testLoader=defaultTestLoader,
+                    exit=True, verbosity=1, failfast=None, catchbreak=None,
+                    buffer=None, stdout=None):
+        if module == __name__:
+            self.module = None
+        elif istext(module):
+            self.module = __import__(module)
+            for part in module.split('.')[1:]:
+                self.module = getattr(self.module, part)
+        else:
+            self.module = module
+        if argv is None:
+            argv = sys.argv
+        if stdout is None:
+            stdout = sys.stdout
+        self.stdout = stdout
+
+        self.exit = exit
+        self.failfast = failfast
+        self.catchbreak = catchbreak
+        self.verbosity = verbosity
+        self.buffer = buffer
+        self.defaultTest = defaultTest
+        self.listtests = False
+        self.load_list = None
+        self.testRunner = testRunner
+        self.testLoader = testLoader
+        progName = argv[0]
+        if progName.endswith('%srun.py' % os.path.sep):
+            elements = progName.split(os.path.sep)
+            progName = '%s.run' % elements[-2]
+        else:
+            progName = os.path.basename(argv[0])
+        self.progName = progName
+        self.parseArgs(argv)
+        if self.load_list:
+            # TODO: preserve existing suites (like testresources does in
+            # OptimisingTestSuite.add, but with a standard protocol).
+            # This is needed because the load_tests hook allows arbitrary
+            # suites, even if that is rarely used.
+            source = open(self.load_list, 'rb')
+            try:
+                lines = source.readlines()
+            finally:
+                source.close()
+            test_ids = set(line.strip().decode('utf-8') for line in lines)
+            self.test = filter_by_ids(self.test, test_ids)
+        if not self.listtests:
+            self.runTests()
+        else:
+            runner = self._get_runner()
+            if safe_hasattr(runner, 'list'):
+                runner.list(self.test)
+            else:
+                for test in iterate_tests(self.test):
+                    self.stdout.write('%s\n' % test.id())
+
+    def usageExit(self, msg=None):
+        if msg:
+            print(msg)
+        usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+                 'buffer': ''}
+        if self.failfast != False:
+            usage['failfast'] = FAILFAST
+        if self.catchbreak != False:
+            usage['catchbreak'] = CATCHBREAK
+        if self.buffer != False:
+            usage['buffer'] = BUFFEROUTPUT
+        print(self.USAGE % usage)
+        sys.exit(2)
+
+    def parseArgs(self, argv):
+        if len(argv) > 1 and argv[1].lower() == 'discover':
+            self._do_discovery(argv[2:])
+            return
+
+        import getopt
+        long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer',
+            'list', 'load-list=']
+        try:
+            options, args = getopt.getopt(argv[1:], 'hHvqfcbl', long_opts)
+            for opt, value in options:
+                if opt in ('-h','-H','--help'):
+                    self.usageExit()
+                if opt in ('-q','--quiet'):
+                    self.verbosity = 0
+                if opt in ('-v','--verbose'):
+                    self.verbosity = 2
+                if opt in ('-f','--failfast'):
+                    if self.failfast is None:
+                        self.failfast = True
+                    # Should this raise an exception if -f is not valid?
+                if opt in ('-c','--catch'):
+                    if self.catchbreak is None:
+                        self.catchbreak = True
+                    # Should this raise an exception if -c is not valid?
+                if opt in ('-b','--buffer'):
+                    if self.buffer is None:
+                        self.buffer = True
+                    # Should this raise an exception if -b is not valid?
+                if opt in ('-l', '--list'):
+                    self.listtests = True
+                if opt == '--load-list':
+                    self.load_list = value
+            if len(args) == 0 and self.defaultTest is None:
+                # createTests will load tests from self.module
+                self.testNames = None
+            elif len(args) > 0:
+                self.testNames = args
+            else:
+                self.testNames = (self.defaultTest,)
+            self.createTests()
+        except getopt.error:
+            self.usageExit(sys.exc_info()[1])
+
+    def createTests(self):
+        if self.testNames is None:
+            self.test = self.testLoader.loadTestsFromModule(self.module)
+        else:
+            self.test = self.testLoader.loadTestsFromNames(self.testNames,
+                                                           self.module)
+
+    def _do_discovery(self, argv, Loader=defaultTestLoaderCls):
+        # handle command line args for test discovery
+        if not have_discover:
+            raise AssertionError("Unable to use discovery, must use python 2.7 "
+                    "or greater, or install the discover package.")
+        _fix_discovery()
+        self.progName = '%s discover' % self.progName
+        import optparse
+        parser = optparse.OptionParser()
+        parser.prog = self.progName
+        parser.add_option('-v', '--verbose', dest='verbose', default=False,
+                          help='Verbose output', action='store_true')
+        if self.failfast != False:
+            parser.add_option('-f', '--failfast', dest='failfast', default=False,
+                              help='Stop on first fail or error',
+                              action='store_true')
+        if self.catchbreak != False:
+            parser.add_option('-c', '--catch', dest='catchbreak', default=False,
+                              help='Catch ctrl-C and display results so far',
+                              action='store_true')
+        if self.buffer != False:
+            parser.add_option('-b', '--buffer', dest='buffer', default=False,
+                              help='Buffer stdout and stderr during tests',
+                              action='store_true')
+        parser.add_option('-s', '--start-directory', dest='start', default='.',
+                          help="Directory to start discovery ('.' default)")
+        parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
+                          help="Pattern to match tests ('test*.py' default)")
+        parser.add_option('-t', '--top-level-directory', dest='top', default=None,
+                          help='Top level directory of project (defaults to start directory)')
+        parser.add_option('-l', '--list', dest='listtests', default=False, action="store_true",
+                          help='List tests rather than running them.')
+        parser.add_option('--load-list', dest='load_list', default=None,
+                          help='Specify a filename containing the test ids to use.')
+
+        options, args = parser.parse_args(argv)
+        if len(args) > 3:
+            self.usageExit()
+
+        for name, value in zip(('start', 'pattern', 'top'), args):
+            setattr(options, name, value)
+
+        # only set options from the parsing here
+        # if they weren't set explicitly in the constructor
+        if self.failfast is None:
+            self.failfast = options.failfast
+        if self.catchbreak is None:
+            self.catchbreak = options.catchbreak
+        if self.buffer is None:
+            self.buffer = options.buffer
+        self.listtests = options.listtests
+        self.load_list = options.load_list
+
+        if options.verbose:
+            self.verbosity = 2
+
+        start_dir = options.start
+        pattern = options.pattern
+        top_level_dir = options.top
+
+        loader = Loader()
+        # See http://bugs.python.org/issue16709
+        # While sorting here is intrusive, its better than being random.
+        # Rules for the sort:
+        # - standard suites are flattened, and the resulting tests sorted by
+        #   id.
+        # - non-standard suites are preserved as-is, and sorted into position
+        #   by the first test found by iterating the suite.
+        # We do this by a DSU process: flatten and grab a key, sort, strip the
+        # keys.
+        loaded = loader.discover(start_dir, pattern, top_level_dir)
+        self.test = sorted_tests(loaded)
+
+    def runTests(self):
+        if (self.catchbreak
+            and getattr(unittest, 'installHandler', None) is not None):
+            unittest.installHandler()
+        testRunner = self._get_runner()
+        self.result = testRunner.run(self.test)
+        if self.exit:
+            sys.exit(not self.result.wasSuccessful())
+
+    def _get_runner(self):
+        if self.testRunner is None:
+            self.testRunner = TestToolsTestRunner
+        try:
+            testRunner = self.testRunner(verbosity=self.verbosity,
+                                         failfast=self.failfast,
+                                         buffer=self.buffer,
+                                         stdout=self.stdout)
+        except TypeError:
+            # didn't accept the verbosity, buffer, failfast or stdout arguments
+            # Try with the prior contract
+            try:
+                testRunner = self.testRunner(verbosity=self.verbosity,
+                                             failfast=self.failfast,
+                                             buffer=self.buffer)
+            except TypeError:
+                # Now try calling it with defaults
+                try:
+                    testRunner = self.testRunner()
+                except TypeError:
+                    # it is assumed to be a TestRunner instance
+                    testRunner = self.testRunner
+        return testRunner
+
+
+def _fix_discovery():
+    # Monkey patch in the bugfix from http://bugs.python.org/issue16662
+    # - the code here is a straight copy from the Python core tree
+    # with the patch applied.
+    global discover_fixed
+    if discover_fixed:
+        return
+    # Do we have a fixed Python?
+    # (not committed upstream yet - so we can't uncomment this code,
+    # but when it gets committed, the next version to be released won't
+    # need monkey patching.
+    # if sys.version_info[:2] > (3, 4):
+    #     discover_fixed = True
+    #     return
+    if not have_discover:
+        return
+    if safe_hasattr(discover_impl, '_jython_aware_splitext'):
+        _jython_aware_splitext = discover_impl._jython_aware_splitext
+    else:
+        def _jython_aware_splitext(path):
+            if path.lower().endswith('$py.class'):
+                return path[:-9]
+            return os.path.splitext(path)[0]
+    def loadTestsFromModule(self, module, use_load_tests=True, pattern=None):
+        """Return a suite of all tests cases contained in the given module"""
+        # use_load_tests is preserved for compatability though it was never
+        # an official API.
+        # pattern is not an official API either; it is used in discovery to
+        # chain the requested pattern down.
+        tests = []
+        for name in dir(module):
+            obj = getattr(module, name)
+            if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
+                tests.append(self.loadTestsFromTestCase(obj))
+
+        load_tests = getattr(module, 'load_tests', None)
+        tests = self.suiteClass(tests)
+        if use_load_tests and load_tests is not None:
+            try:
+                return load_tests(self, tests, pattern)
+            except Exception as e:
+                return discover_impl._make_failed_load_tests(
+                    module.__name__, e, self.suiteClass)
+        return tests
+    def _find_tests(self, start_dir, pattern, namespace=False):
+        """Used by discovery. Yields test suites it loads."""
+        paths = sorted(os.listdir(start_dir))
+
+        for path in paths:
+            full_path = os.path.join(start_dir, path)
+            if os.path.isfile(full_path):
+                if not discover_impl.VALID_MODULE_NAME.match(path):
+                    # valid Python identifiers only
+                    continue
+                if not self._match_path(path, full_path, pattern):
+                    continue
+                # if the test file matches, load it
+                name = self._get_name_from_path(full_path)
+                try:
+                    module = self._get_module_from_name(name)
+                except testcase.TestSkipped as e:
+                    yield discover_impl._make_skipped_test(
+                        name, e, self.suiteClass)
+                except:
+                    yield discover_impl._make_failed_import_test(
+                        name, self.suiteClass)
+                else:
+                    mod_file = os.path.abspath(getattr(module, '__file__', full_path))
+                    realpath = _jython_aware_splitext(
+                        os.path.realpath(mod_file))
+                    fullpath_noext = _jython_aware_splitext(
+                        os.path.realpath(full_path))
+                    if realpath.lower() != fullpath_noext.lower():
+                        module_dir = os.path.dirname(realpath)
+                        mod_name = _jython_aware_splitext(
+                            os.path.basename(full_path))
+                        expected_dir = os.path.dirname(full_path)
+                        msg = ("%r module incorrectly imported from %r. Expected %r. "
+                               "Is this module globally installed?")
+                        raise ImportError(msg % (mod_name, module_dir, expected_dir))
+                    yield self.loadTestsFromModule(module, pattern=pattern)
+            elif os.path.isdir(full_path):
+                if (not namespace and
+                    not os.path.isfile(os.path.join(full_path, '__init__.py'))):
+                    continue
+
+                load_tests = None
+                tests = None
+                name = self._get_name_from_path(full_path)
+                try:
+                    package = self._get_module_from_name(name)
+                except testcase.TestSkipped as e:
+                    yield discover_impl._make_skipped_test(
+                        name, e, self.suiteClass)
+                except:
+                    yield discover_impl._make_failed_import_test(
+                        name, self.suiteClass)
+                else:
+                    load_tests = getattr(package, 'load_tests', None)
+                    tests = self.loadTestsFromModule(package, pattern=pattern)
+                    if tests is not None:
+                        # tests loaded from package file
+                        yield tests
+
+                    if load_tests is not None:
+                        # loadTestsFromModule(package) has load_tests for us.
+                        continue
+                    # recurse into the package
+                    pkg_tests =  self._find_tests(
+                        full_path, pattern, namespace=namespace)
+                    for test in pkg_tests:
+                        yield test
+    defaultTestLoaderCls.loadTestsFromModule = loadTestsFromModule
+    defaultTestLoaderCls._find_tests = _find_tests
+
+################
+
+def main(argv, stdout):
+    program = TestProgram(argv=argv, testRunner=partial(TestToolsTestRunner, stdout=stdout),
+        stdout=stdout)
+
+if __name__ == '__main__':
+    main(sys.argv, sys.stdout)
diff --git a/third_party/testtools/testtools/runtest.py b/third_party/testtools/testtools/runtest.py
new file mode 100644
index 0000000..a29cdd6
--- /dev/null
+++ b/third_party/testtools/testtools/runtest.py
@@ -0,0 +1,227 @@
+# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
+
+"""Individual test case execution."""
+
+__all__ = [
+    'MultipleExceptions',
+    'RunTest',
+    ]
+
+import sys
+
+from testtools.testresult import ExtendedToOriginalDecorator
+
+
+class MultipleExceptions(Exception):
+    """Represents many exceptions raised from some operation.
+
+    :ivar args: The sys.exc_info() tuples for each exception.
+    """
+
+
+class RunTest(object):
+    """An object to run a test.
+
+    RunTest objects are used to implement the internal logic involved in
+    running a test. TestCase.__init__ stores _RunTest as the class of RunTest
+    to execute.  Passing the runTest= parameter to TestCase.__init__ allows a
+    different RunTest class to be used to execute the test.
+
+    Subclassing or replacing RunTest can be useful to add functionality to the
+    way that tests are run in a given project.
+
+    :ivar case: The test case that is to be run.
+    :ivar result: The result object a case is reporting to.
+    :ivar handlers: A list of (ExceptionClass, handler_function) for
+        exceptions that should be caught if raised from the user
+        code. Exceptions that are caught are checked against this list in
+        first to last order.  There is a catch-all of 'Exception' at the end
+        of the list, so to add a new exception to the list, insert it at the
+        front (which ensures that it will be checked before any existing base
+        classes in the list. If you add multiple exceptions some of which are
+        subclasses of each other, add the most specific exceptions last (so
+        they come before their parent classes in the list).
+    :ivar exception_caught: An object returned when _run_user catches an
+        exception.
+    :ivar _exceptions: A list of caught exceptions, used to do the single
+        reporting of error/failure/skip etc.
+    """
+
+    def __init__(self, case, handlers=None, last_resort=None):
+        """Create a RunTest to run a case.
+
+        :param case: A testtools.TestCase test case object.
+        :param handlers: Exception handlers for this RunTest. These are stored
+            in self.handlers and can be modified later if needed.
+        :param last_resort: A handler of last resort: any exception which is
+            not handled by handlers will cause the last resort handler to be
+            called as last_resort(exc_info), and then the exception will be
+            raised - aborting the test run as this is inside the runner
+            machinery rather than the confined context of the test.
+        """
+        self.case = case
+        self.handlers = handlers or []
+        self.exception_caught = object()
+        self._exceptions = []
+        self.last_resort = last_resort or (lambda case, result, exc: None)
+
+    def run(self, result=None):
+        """Run self.case reporting activity to result.
+
+        :param result: Optional testtools.TestResult to report activity to.
+        :return: The result object the test was run against.
+        """
+        if result is None:
+            actual_result = self.case.defaultTestResult()
+            actual_result.startTestRun()
+        else:
+            actual_result = result
+        try:
+            return self._run_one(actual_result)
+        finally:
+            if result is None:
+                actual_result.stopTestRun()
+
+    def _run_one(self, result):
+        """Run one test reporting to result.
+
+        :param result: A testtools.TestResult to report activity to.
+            This result object is decorated with an ExtendedToOriginalDecorator
+            to ensure that the latest TestResult API can be used with
+            confidence by client code.
+        :return: The result object the test was run against.
+        """
+        return self._run_prepared_result(ExtendedToOriginalDecorator(result))
+
+    def _run_prepared_result(self, result):
+        """Run one test reporting to result.
+
+        :param result: A testtools.TestResult to report activity to.
+        :return: The result object the test was run against.
+        """
+        result.startTest(self.case)
+        self.result = result
+        try:
+            self._exceptions = []
+            self._run_core()
+            if self._exceptions:
+                # One or more caught exceptions, now trigger the test's
+                # reporting method for just one.
+                e = self._exceptions.pop()
+                for exc_class, handler in self.handlers:
+                    if isinstance(e, exc_class):
+                        handler(self.case, self.result, e)
+                        break
+                else:
+                    self.last_resort(self.case, self.result, e)
+                    raise e
+        finally:
+            result.stopTest(self.case)
+        return result
+
+    def _run_core(self):
+        """Run the user supplied test code."""
+        test_method = self.case._get_test_method()
+        if getattr(test_method, '__unittest_skip__', False):
+            self.result.addSkip(
+                self.case,
+                reason=getattr(test_method, '__unittest_skip_why__', None)
+            )
+            return
+
+        if self.exception_caught == self._run_user(self.case._run_setup,
+            self.result):
+            # Don't run the test method if we failed getting here.
+            self._run_cleanups(self.result)
+            return
+        # Run everything from here on in. If any of the methods raise an
+        # exception we'll have failed.
+        failed = False
+        try:
+            if self.exception_caught == self._run_user(
+                self.case._run_test_method, self.result):
+                failed = True
+        finally:
+            try:
+                if self.exception_caught == self._run_user(
+                    self.case._run_teardown, self.result):
+                    failed = True
+            finally:
+                try:
+                    if self.exception_caught == self._run_user(
+                        self._run_cleanups, self.result):
+                        failed = True
+                finally:
+                    if getattr(self.case, 'force_failure', None):
+                        self._run_user(_raise_force_fail_error)
+                        failed = True
+                    if not failed:
+                        self.result.addSuccess(self.case,
+                            details=self.case.getDetails())
+
+    def _run_cleanups(self, result):
+        """Run the cleanups that have been added with addCleanup.
+
+        See the docstring for addCleanup for more information.
+
+        :return: None if all cleanups ran without error,
+            ``exception_caught`` if there was an error.
+        """
+        failing = False
+        while self.case._cleanups:
+            function, arguments, keywordArguments = self.case._cleanups.pop()
+            got_exception = self._run_user(
+                function, *arguments, **keywordArguments)
+            if got_exception == self.exception_caught:
+                failing = True
+        if failing:
+            return self.exception_caught
+
+    def _run_user(self, fn, *args, **kwargs):
+        """Run a user supplied function.
+
+        Exceptions are processed by `_got_user_exception`.
+
+        :return: Either whatever 'fn' returns or ``exception_caught`` if
+            'fn' raised an exception.
+        """
+        try:
+            return fn(*args, **kwargs)
+        except:
+            return self._got_user_exception(sys.exc_info())
+
+    def _got_user_exception(self, exc_info, tb_label='traceback'):
+        """Called when user code raises an exception.
+
+        If 'exc_info' is a `MultipleExceptions`, then we recurse into it
+        unpacking the errors that it's made up from.
+
+        :param exc_info: A sys.exc_info() tuple for the user error.
+        :param tb_label: An optional string label for the error.  If
+            not specified, will default to 'traceback'.
+        :return: 'exception_caught' if we catch one of the exceptions that
+            have handlers in 'handlers', otherwise raise the error.
+        """
+        if exc_info[0] is MultipleExceptions:
+            for sub_exc_info in exc_info[1].args:
+                self._got_user_exception(sub_exc_info, tb_label)
+            return self.exception_caught
+        try:
+            e = exc_info[1]
+            self.case.onException(exc_info, tb_label=tb_label)
+        finally:
+            del exc_info
+        self._exceptions.append(e)
+        # Yes, this means we catch everything - we re-raise KeyBoardInterrupt
+        # etc later, after tearDown and cleanUp - since those may be cleaning up
+        # external processes.
+        return self.exception_caught
+
+
+def _raise_force_fail_error():
+    raise AssertionError("Forced Test Failure")
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/third_party/testtools/testtools/tags.py b/third_party/testtools/testtools/tags.py
new file mode 100644
index 0000000..b55bd38
--- /dev/null
+++ b/third_party/testtools/testtools/tags.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Tag support."""
+
+
+class TagContext(object):
+    """A tag context."""
+
+    def __init__(self, parent=None):
+        """Create a new TagContext.
+
+        :param parent: If provided, uses this as the parent context.  Any tags
+            that are current on the parent at the time of construction are
+            current in this context.
+        """
+        self.parent = parent
+        self._tags = set()
+        if parent:
+            self._tags.update(parent.get_current_tags())
+
+    def get_current_tags(self):
+        """Return any current tags."""
+        return set(self._tags)
+
+    def change_tags(self, new_tags, gone_tags):
+        """Change the tags on this context.
+
+        :param new_tags: A set of tags to add to this context.
+        :param gone_tags: A set of tags to remove from this context.
+        :return: The tags now current on this context.
+        """
+        self._tags.update(new_tags)
+        self._tags.difference_update(gone_tags)
+        return self.get_current_tags()
diff --git a/third_party/testtools/testtools/testcase.py b/third_party/testtools/testtools/testcase.py
new file mode 100644
index 0000000..b646f82
--- /dev/null
+++ b/third_party/testtools/testtools/testcase.py
@@ -0,0 +1,1022 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+"""Test case related stuff."""
+
+__metaclass__ = type
+__all__ = [
+    'attr',
+    'clone_test_with_new_id',
+    'ExpectedException',
+    'gather_details',
+    'run_test_with',
+    'skip',
+    'skipIf',
+    'skipUnless',
+    'TestCase',
+    ]
+
+import copy
+import functools
+import itertools
+import sys
+import types
+import unittest
+
+from extras import (
+    safe_hasattr,
+    try_import,
+    )
+
+from testtools import (
+    content,
+    )
+from testtools.compat import (
+    advance_iterator,
+    reraise,
+    )
+from testtools.matchers import (
+    Annotate,
+    Contains,
+    Equals,
+    MatchesAll,
+    MatchesException,
+    MismatchError,
+    Is,
+    IsInstance,
+    Not,
+    Raises,
+    )
+from testtools.monkey import patch
+from testtools.runtest import RunTest
+from testtools.testresult import (
+    ExtendedToOriginalDecorator,
+    TestResult,
+    )
+
+wraps = try_import('functools.wraps')
+
+class TestSkipped(Exception):
+    """Raised within TestCase.run() when a test is skipped."""
+TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
+TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
+
+
+class _UnexpectedSuccess(Exception):
+    """An unexpected success was raised.
+
+    Note that this exception is private plumbing in testtools' testcase
+    module.
+    """
+_UnexpectedSuccess = try_import(
+    'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess)
+_UnexpectedSuccess = try_import(
+    'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
+
+class _ExpectedFailure(Exception):
+    """An expected failure occured.
+
+    Note that this exception is private plumbing in testtools' testcase
+    module.
+    """
+_ExpectedFailure = try_import(
+    'unittest2.case._ExpectedFailure', _ExpectedFailure)
+_ExpectedFailure = try_import(
+    'unittest.case._ExpectedFailure', _ExpectedFailure)
+
+
+# Copied from unittest before python 3.4 release. Used to maintain
+# compatibility with unittest sub-test feature. Users should not use this
+# directly.
+def _expectedFailure(func):
+    @functools.wraps(func)
+    def wrapper(*args, **kwargs):
+        try:
+            func(*args, **kwargs)
+        except Exception:
+            raise _ExpectedFailure(sys.exc_info())
+        raise _UnexpectedSuccess
+    return wrapper
+
+
+def run_test_with(test_runner, **kwargs):
+    """Decorate a test as using a specific ``RunTest``.
+
+    e.g.::
+
+      @run_test_with(CustomRunner, timeout=42)
+      def test_foo(self):
+          self.assertTrue(True)
+
+    The returned decorator works by setting an attribute on the decorated
+    function.  `TestCase.__init__` looks for this attribute when deciding on a
+    ``RunTest`` factory.  If you wish to use multiple decorators on a test
+    method, then you must either make this one the top-most decorator, or you
+    must write your decorators so that they update the wrapping function with
+    the attributes of the wrapped function.  The latter is recommended style
+    anyway.  ``functools.wraps``, ``functools.wrapper`` and
+    ``twisted.python.util.mergeFunctionMetadata`` can help you do this.
+
+    :param test_runner: A ``RunTest`` factory that takes a test case and an
+        optional list of exception handlers.  See ``RunTest``.
+    :param kwargs: Keyword arguments to pass on as extra arguments to
+        'test_runner'.
+    :return: A decorator to be used for marking a test as needing a special
+        runner.
+    """
+    def decorator(function):
+        # Set an attribute on 'function' which will inform TestCase how to
+        # make the runner.
+        def _run_test_with(case, handlers=None, last_resort=None):
+            try:
+                return test_runner(
+                    case, handlers=handlers, last_resort=last_resort,
+                    **kwargs)
+            except TypeError:
+                # Backwards compat: if we can't call the constructor
+                # with last_resort, try without that.
+                return test_runner(case, handlers=handlers, **kwargs)
+        function._run_test_with = _run_test_with
+        return function
+    return decorator
+
+
+def _copy_content(content_object):
+    """Make a copy of the given content object.
+
+    The content within ``content_object`` is iterated and saved. This is
+    useful when the source of the content is volatile, a log file in a
+    temporary directory for example.
+
+    :param content_object: A `content.Content` instance.
+    :return: A `content.Content` instance with the same mime-type as
+        ``content_object`` and a non-volatile copy of its content.
+    """
+    content_bytes = list(content_object.iter_bytes())
+    content_callback = lambda: content_bytes
+    return content.Content(content_object.content_type, content_callback)
+
+
+def gather_details(source_dict, target_dict):
+    """Merge the details from ``source_dict`` into ``target_dict``.
+
+    :param source_dict: A dictionary of details will be gathered.
+    :param target_dict: A dictionary into which details will be gathered.
+    """
+    for name, content_object in source_dict.items():
+        new_name = name
+        disambiguator = itertools.count(1)
+        while new_name in target_dict:
+            new_name = '%s-%d' % (name, advance_iterator(disambiguator))
+        name = new_name
+        target_dict[name] = _copy_content(content_object)
+
+
+class TestCase(unittest.TestCase):
+    """Extensions to the basic TestCase.
+
+    :ivar exception_handlers: Exceptions to catch from setUp, runTest and
+        tearDown. This list is able to be modified at any time and consists of
+        (exception_class, handler(case, result, exception_value)) pairs.
+    :ivar force_failure: Force testtools.RunTest to fail the test after the
+        test has completed.
+    :cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
+        Defaults to ``RunTest``.  The factory is expected to take a test case
+        and an optional list of exception handlers.
+    """
+
+    skipException = TestSkipped
+
+    run_tests_with = RunTest
+
+    def __init__(self, *args, **kwargs):
+        """Construct a TestCase.
+
+        :param testMethod: The name of the method to run.
+        :keyword runTest: Optional class to use to execute the test. If not
+            supplied ``RunTest`` is used. The instance to be used is created
+            when run() is invoked, so will be fresh each time. Overrides
+            ``TestCase.run_tests_with`` if given.
+        """
+        runTest = kwargs.pop('runTest', None)
+        super(TestCase, self).__init__(*args, **kwargs)
+        self._cleanups = []
+        self._unique_id_gen = itertools.count(1)
+        # Generators to ensure unique traceback ids.  Maps traceback label to
+        # iterators.
+        self._traceback_id_gens = {}
+        self.__setup_called = False
+        self.__teardown_called = False
+        # __details is lazy-initialized so that a constructed-but-not-run
+        # TestCase is safe to use with clone_test_with_new_id.
+        self.__details = None
+        test_method = self._get_test_method()
+        if runTest is None:
+            runTest = getattr(
+                test_method, '_run_test_with', self.run_tests_with)
+        self.__RunTest = runTest
+        if getattr(test_method, '__unittest_expecting_failure__', False):
+            setattr(self, self._testMethodName, _expectedFailure(test_method))
+        # Used internally for onException processing - used to gather extra
+        # data from exceptions.
+        self.__exception_handlers = []
+        # Passed to RunTest to map exceptions to result actions
+        self.exception_handlers = [
+            (self.skipException, self._report_skip),
+            (self.failureException, self._report_failure),
+            (_ExpectedFailure, self._report_expected_failure),
+            (_UnexpectedSuccess, self._report_unexpected_success),
+            (Exception, self._report_error),
+            ]
+
+    def __eq__(self, other):
+        eq = getattr(unittest.TestCase, '__eq__', None)
+        if eq is not None and not unittest.TestCase.__eq__(self, other):
+            return False
+        return self.__dict__ == other.__dict__
+
+    def __repr__(self):
+        # We add id to the repr because it makes testing testtools easier.
+        return "<%s id=0x%0x>" % (self.id(), id(self))
+
+    def addDetail(self, name, content_object):
+        """Add a detail to be reported with this test's outcome.
+
+        For more details see pydoc testtools.TestResult.
+
+        :param name: The name to give this detail.
+        :param content_object: The content object for this detail. See
+            testtools.content for more detail.
+        """
+        if self.__details is None:
+            self.__details = {}
+        self.__details[name] = content_object
+
+    def getDetails(self):
+        """Get the details dict that will be reported with this test's outcome.
+
+        For more details see pydoc testtools.TestResult.
+        """
+        if self.__details is None:
+            self.__details = {}
+        return self.__details
+
+    def patch(self, obj, attribute, value):
+        """Monkey-patch 'obj.attribute' to 'value' while the test is running.
+
+        If 'obj' has no attribute, then the monkey-patch will still go ahead,
+        and the attribute will be deleted instead of restored to its original
+        value.
+
+        :param obj: The object to patch. Can be anything.
+        :param attribute: The attribute on 'obj' to patch.
+        :param value: The value to set 'obj.attribute' to.
+        """
+        self.addCleanup(patch(obj, attribute, value))
+
+    def shortDescription(self):
+        return self.id()
+
+    def skipTest(self, reason):
+        """Cause this test to be skipped.
+
+        This raises self.skipException(reason). skipException is raised
+        to permit a skip to be triggered at any point (during setUp or the
+        testMethod itself). The run() method catches skipException and
+        translates that into a call to the result objects addSkip method.
+
+        :param reason: The reason why the test is being skipped. This must
+            support being cast into a unicode string for reporting.
+        """
+        raise self.skipException(reason)
+
+    # skipTest is how python2.7 spells this. Sometime in the future
+    # This should be given a deprecation decorator - RBC 20100611.
+    skip = skipTest
+
+    def _formatTypes(self, classOrIterable):
+        """Format a class or a bunch of classes for display in an error."""
+        className = getattr(classOrIterable, '__name__', None)
+        if className is None:
+            className = ', '.join(klass.__name__ for klass in classOrIterable)
+        return className
+
+    def addCleanup(self, function, *arguments, **keywordArguments):
+        """Add a cleanup function to be called after tearDown.
+
+        Functions added with addCleanup will be called in reverse order of
+        adding after tearDown, or after setUp if setUp raises an exception.
+
+        If a function added with addCleanup raises an exception, the error
+        will be recorded as a test error, and the next cleanup will then be
+        run.
+
+        Cleanup functions are always called before a test finishes running,
+        even if setUp is aborted by an exception.
+        """
+        self._cleanups.append((function, arguments, keywordArguments))
+
+    def addOnException(self, handler):
+        """Add a handler to be called when an exception occurs in test code.
+
+        This handler cannot affect what result methods are called, and is
+        called before any outcome is called on the result object. An example
+        use for it is to add some diagnostic state to the test details dict
+        which is expensive to calculate and not interesting for reporting in
+        the success case.
+
+        Handlers are called before the outcome (such as addFailure) that
+        the exception has caused.
+
+        Handlers are called in first-added, first-called order, and if they
+        raise an exception, that will propogate out of the test running
+        machinery, halting test processing. As a result, do not call code that
+        may unreasonably fail.
+        """
+        self.__exception_handlers.append(handler)
+
+    def _add_reason(self, reason):
+        self.addDetail('reason', content.text_content(reason))
+
+    def assertEqual(self, expected, observed, message=''):
+        """Assert that 'expected' is equal to 'observed'.
+
+        :param expected: The expected value.
+        :param observed: The observed value.
+        :param message: An optional message to include in the error.
+        """
+        matcher = Equals(expected)
+        self.assertThat(observed, matcher, message)
+
+    failUnlessEqual = assertEquals = assertEqual
+
+    def assertIn(self, needle, haystack, message=''):
+        """Assert that needle is in haystack."""
+        self.assertThat(haystack, Contains(needle), message)
+
+    def assertIsNone(self, observed, message=''):
+        """Assert that 'observed' is equal to None.
+
+        :param observed: The observed value.
+        :param message: An optional message describing the error.
+        """
+        matcher = Is(None)
+        self.assertThat(observed, matcher, message)
+
+    def assertIsNotNone(self, observed, message=''):
+        """Assert that 'observed' is not equal to None.
+
+        :param observed: The observed value.
+        :param message: An optional message describing the error.
+        """
+        matcher = Not(Is(None))
+        self.assertThat(observed, matcher, message)
+
+    def assertIs(self, expected, observed, message=''):
+        """Assert that 'expected' is 'observed'.
+
+        :param expected: The expected value.
+        :param observed: The observed value.
+        :param message: An optional message describing the error.
+        """
+        matcher = Is(expected)
+        self.assertThat(observed, matcher, message)
+
+    def assertIsNot(self, expected, observed, message=''):
+        """Assert that 'expected' is not 'observed'."""
+        matcher = Not(Is(expected))
+        self.assertThat(observed, matcher, message)
+
+    def assertNotIn(self, needle, haystack, message=''):
+        """Assert that needle is not in haystack."""
+        matcher = Not(Contains(needle))
+        self.assertThat(haystack, matcher, message)
+
+    def assertIsInstance(self, obj, klass, msg=None):
+        if isinstance(klass, tuple):
+            matcher = IsInstance(*klass)
+        else:
+            matcher = IsInstance(klass)
+        self.assertThat(obj, matcher, msg)
+
+    def assertRaises(self, excClass, callableObj, *args, **kwargs):
+        """Fail unless an exception of class excClass is thrown
+           by callableObj when invoked with arguments args and keyword
+           arguments kwargs. If a different type of exception is
+           thrown, it will not be caught, and the test case will be
+           deemed to have suffered an error, exactly as for an
+           unexpected exception.
+        """
+        class ReRaiseOtherTypes(object):
+            def match(self, matchee):
+                if not issubclass(matchee[0], excClass):
+                    reraise(*matchee)
+        class CaptureMatchee(object):
+            def match(self, matchee):
+                self.matchee = matchee[1]
+        capture = CaptureMatchee()
+        matcher = Raises(MatchesAll(ReRaiseOtherTypes(),
+                MatchesException(excClass), capture))
+        our_callable = Nullary(callableObj, *args, **kwargs)
+        self.assertThat(our_callable, matcher)
+        return capture.matchee
+    failUnlessRaises = assertRaises
+
+    def assertThat(self, matchee, matcher, message='', verbose=False):
+        """Assert that matchee is matched by matcher.
+
+        :param matchee: An object to match with matcher.
+        :param matcher: An object meeting the testtools.Matcher protocol.
+        :raises MismatchError: When matcher does not match thing.
+        """
+        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
+        if mismatch_error is not None:
+            raise mismatch_error
+
+    def addDetailUniqueName(self, name, content_object):
+        """Add a detail to the test, but ensure it's name is unique.
+
+        This method checks whether ``name`` conflicts with a detail that has
+        already been added to the test. If it does, it will modify ``name`` to
+        avoid the conflict.
+
+        For more details see pydoc testtools.TestResult.
+
+        :param name: The name to give this detail.
+        :param content_object: The content object for this detail. See
+            testtools.content for more detail.
+        """
+        existing_details = self.getDetails()
+        full_name = name
+        suffix = 1
+        while full_name in existing_details:
+            full_name = "%s-%d" % (name, suffix)
+            suffix += 1
+        self.addDetail(full_name, content_object)
+
+    def expectThat(self, matchee, matcher, message='', verbose=False):
+        """Check that matchee is matched by matcher, but delay the assertion failure.
+
+        This method behaves similarly to ``assertThat``, except that a failed
+        match does not exit the test immediately. The rest of the test code will
+        continue to run, and the test will be marked as failing after the test
+        has finished.
+
+        :param matchee: An object to match with matcher.
+        :param matcher: An object meeting the testtools.Matcher protocol.
+        :param message: If specified, show this message with any failed match.
+        """
+        mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
+
+        if mismatch_error is not None:
+            self.addDetailUniqueName(
+                "Failed expectation",
+                content.StacktraceContent(
+                    postfix_content="MismatchError: " + str(mismatch_error)
+                )
+            )
+            self.force_failure = True
+
+    def _matchHelper(self, matchee, matcher, message, verbose):
+        matcher = Annotate.if_message(message, matcher)
+        mismatch = matcher.match(matchee)
+        if not mismatch:
+            return
+        for (name, value) in mismatch.get_details().items():
+            self.addDetailUniqueName(name, value)
+        return MismatchError(matchee, matcher, mismatch, verbose)
+
+    def defaultTestResult(self):
+        return TestResult()
+
+    def expectFailure(self, reason, predicate, *args, **kwargs):
+        """Check that a test fails in a particular way.
+
+        If the test fails in the expected way, a KnownFailure is caused. If it
+        succeeds an UnexpectedSuccess is caused.
+
+        The expected use of expectFailure is as a barrier at the point in a
+        test where the test would fail. For example:
+        >>> def test_foo(self):
+        >>>    self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
+        >>>    self.assertEqual(1, 0)
+
+        If in the future 1 were to equal 0, the expectFailure call can simply
+        be removed. This separation preserves the original intent of the test
+        while it is in the expectFailure mode.
+        """
+        # TODO: implement with matchers.
+        self._add_reason(reason)
+        try:
+            predicate(*args, **kwargs)
+        except self.failureException:
+            # GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
+            #                unittest _ExpectedFailure wants old traceback
+            exc_info = sys.exc_info()
+            try:
+                self._report_traceback(exc_info)
+                raise _ExpectedFailure(exc_info)
+            finally:
+                del exc_info
+        else:
+            raise _UnexpectedSuccess(reason)
+
+    def getUniqueInteger(self):
+        """Get an integer unique to this test.
+
+        Returns an integer that is guaranteed to be unique to this instance.
+        Use this when you need an arbitrary integer in your test, or as a
+        helper for custom anonymous factory methods.
+        """
+        return advance_iterator(self._unique_id_gen)
+
+    def getUniqueString(self, prefix=None):
+        """Get a string unique to this test.
+
+        Returns a string that is guaranteed to be unique to this instance. Use
+        this when you need an arbitrary string in your test, or as a helper
+        for custom anonymous factory methods.
+
+        :param prefix: The prefix of the string. If not provided, defaults
+            to the id of the tests.
+        :return: A bytestring of '<prefix>-<unique_int>'.
+        """
+        if prefix is None:
+            prefix = self.id()
+        return '%s-%d' % (prefix, self.getUniqueInteger())
+
+    def onException(self, exc_info, tb_label='traceback'):
+        """Called when an exception propogates from test code.
+
+        :seealso addOnException:
+        """
+        if exc_info[0] not in [
+            TestSkipped, _UnexpectedSuccess, _ExpectedFailure]:
+            self._report_traceback(exc_info, tb_label=tb_label)
+        for handler in self.__exception_handlers:
+            handler(exc_info)
+
+    @staticmethod
+    def _report_error(self, result, err):
+        result.addError(self, details=self.getDetails())
+
+    @staticmethod
+    def _report_expected_failure(self, result, err):
+        result.addExpectedFailure(self, details=self.getDetails())
+
+    @staticmethod
+    def _report_failure(self, result, err):
+        result.addFailure(self, details=self.getDetails())
+
+    @staticmethod
+    def _report_skip(self, result, err):
+        if err.args:
+            reason = err.args[0]
+        else:
+            reason = "no reason given."
+        self._add_reason(reason)
+        result.addSkip(self, details=self.getDetails())
+
+    def _report_traceback(self, exc_info, tb_label='traceback'):
+        id_gen = self._traceback_id_gens.setdefault(
+            tb_label, itertools.count(0))
+        while True:
+            tb_id = advance_iterator(id_gen)
+            if tb_id:
+                tb_label = '%s-%d' % (tb_label, tb_id)
+            if tb_label not in self.getDetails():
+                break
+        self.addDetail(tb_label, content.TracebackContent(exc_info, self))
+
+    @staticmethod
+    def _report_unexpected_success(self, result, err):
+        result.addUnexpectedSuccess(self, details=self.getDetails())
+
+    def run(self, result=None):
+        try:
+            run_test = self.__RunTest(
+                self, self.exception_handlers, last_resort=self._report_error)
+        except TypeError:
+            # Backwards compat: if we can't call the constructor
+            # with last_resort, try without that.
+            run_test = self.__RunTest(self, self.exception_handlers)
+        return run_test.run(result)
+
+    def _run_setup(self, result):
+        """Run the setUp function for this test.
+
+        :param result: A testtools.TestResult to report activity to.
+        :raises ValueError: If the base class setUp is not called, a
+            ValueError is raised.
+        """
+        ret = self.setUp()
+        if not self.__setup_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.setUp was not called. Have you upcalled all the "
+                "way up the hierarchy from your setUp? e.g. Call "
+                "super(%s, self).setUp() from your setUp()."
+                % (sys.modules[self.__class__.__module__].__file__,
+                   self.__class__.__name__))
+        return ret
+
+    def _run_teardown(self, result):
+        """Run the tearDown function for this test.
+
+        :param result: A testtools.TestResult to report activity to.
+        :raises ValueError: If the base class tearDown is not called, a
+            ValueError is raised.
+        """
+        ret = self.tearDown()
+        if not self.__teardown_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.tearDown was not called. Have you upcalled all the "
+                "way up the hierarchy from your tearDown? e.g. Call "
+                "super(%s, self).tearDown() from your tearDown()."
+                % (sys.modules[self.__class__.__module__].__file__,
+                   self.__class__.__name__))
+        return ret
+
+    def _get_test_method(self):
+        method_name = getattr(self, '_testMethodName')
+        return getattr(self, method_name)
+
+    def _run_test_method(self, result):
+        """Run the test method for this test.
+
+        :param result: A testtools.TestResult to report activity to.
+        :return: None.
+        """
+        return self._get_test_method()()
+
+    def useFixture(self, fixture):
+        """Use fixture in a test case.
+
+        The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
+
+        :param fixture: The fixture to use.
+        :return: The fixture, after setting it up and scheduling a cleanup for
+           it.
+        """
+        try:
+            fixture.setUp()
+        except:
+            exc_info = sys.exc_info()
+            try:
+                gather_details(fixture.getDetails(), self.getDetails())
+            except:
+                # Report the setUp exception, then raise the error during
+                # gather_details.
+                self._report_traceback(exc_info)
+                raise
+            else:
+                # Gather_details worked, so raise the exception setUp
+                # encountered.
+                reraise(*exc_info)
+        else:
+            self.addCleanup(fixture.cleanUp)
+            self.addCleanup(
+                gather_details, fixture.getDetails(), self.getDetails())
+            return fixture
+
+    def setUp(self):
+        super(TestCase, self).setUp()
+        if self.__setup_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.setUp was already called. Do not explicitly call "
+                "setUp from your tests. In your own setUp, use super to call "
+                "the base setUp."
+                % (sys.modules[self.__class__.__module__].__file__,))
+        self.__setup_called = True
+
+    def tearDown(self):
+        super(TestCase, self).tearDown()
+        if self.__teardown_called:
+            raise ValueError(
+                "In File: %s\n"
+                "TestCase.tearDown was already called. Do not explicitly call "
+                "tearDown from your tests. In your own tearDown, use super to "
+                "call the base tearDown."
+                % (sys.modules[self.__class__.__module__].__file__,))
+        self.__teardown_called = True
+
+
+class PlaceHolder(object):
+    """A placeholder test.
+
+    `PlaceHolder` implements much of the same interface as TestCase and is
+    particularly suitable for being added to TestResults.
+    """
+
+    failureException = None
+
+    def __init__(self, test_id, short_description=None, details=None,
+        outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
+        """Construct a `PlaceHolder`.
+
+        :param test_id: The id of the placeholder test.
+        :param short_description: The short description of the place holder
+            test. If not provided, the id will be used instead.
+        :param details: Outcome details as accepted by addSuccess etc.
+        :param outcome: The outcome to call. Defaults to 'addSuccess'.
+        :param tags: Tags to report for the test.
+        :param timestamps: A two-tuple of timestamps for the test start and
+            finish. Each timestamp may be None to indicate it is not known.
+        """
+        self._test_id = test_id
+        self._short_description = short_description
+        self._details = details or {}
+        self._outcome = outcome
+        if error is not None:
+            self._details['traceback'] = content.TracebackContent(error, self)
+        tags = tags or frozenset()
+        self._tags = frozenset(tags)
+        self._timestamps = timestamps
+
+    def __call__(self, result=None):
+        return self.run(result=result)
+
+    def __repr__(self):
+        internal = [self._outcome, self._test_id, self._details]
+        if self._short_description is not None:
+            internal.append(self._short_description)
+        return "<%s.%s(%s)>" % (
+            self.__class__.__module__,
+            self.__class__.__name__,
+            ", ".join(map(repr, internal)))
+
+    def __str__(self):
+        return self.id()
+
+    def countTestCases(self):
+        return 1
+
+    def debug(self):
+        pass
+
+    def id(self):
+        return self._test_id
+
+    def _result(self, result):
+        if result is None:
+            return TestResult()
+        else:
+            return ExtendedToOriginalDecorator(result)
+
+    def run(self, result=None):
+        result = self._result(result)
+        if self._timestamps[0] is not None:
+            result.time(self._timestamps[0])
+        result.tags(self._tags, set())
+        result.startTest(self)
+        if self._timestamps[1] is not None:
+            result.time(self._timestamps[1])
+        outcome = getattr(result, self._outcome)
+        outcome(self, details=self._details)
+        result.stopTest(self)
+        result.tags(set(), self._tags)
+
+    def shortDescription(self):
+        if self._short_description is None:
+            return self.id()
+        else:
+            return self._short_description
+
+
+def ErrorHolder(test_id, error, short_description=None, details=None):
+    """Construct an `ErrorHolder`.
+
+    :param test_id: The id of the test.
+    :param error: The exc info tuple that will be used as the test's error.
+        This is inserted into the details as 'traceback' - any existing key
+        will be overridden.
+    :param short_description: An optional short description of the test.
+    :param details: Outcome details as accepted by addSuccess etc.
+    """
+    return PlaceHolder(test_id, short_description=short_description,
+        details=details, outcome='addError', error=error)
+
+
+def _clone_test_id_callback(test, callback):
+    """Copy a `TestCase`, and make it call callback for its id().
+
+    This is only expected to be used on tests that have been constructed but
+    not executed.
+
+    :param test: A TestCase instance.
+    :param callback: A callable that takes no parameters and returns a string.
+    :return: A copy.copy of the test with id=callback.
+    """
+    newTest = copy.copy(test)
+    newTest.id = callback
+    return newTest
+
+
+def clone_test_with_new_id(test, new_id):
+    """Copy a `TestCase`, and give the copied test a new id.
+
+    This is only expected to be used on tests that have been constructed but
+    not executed.
+    """
+    return _clone_test_id_callback(test, lambda: new_id)
+
+
+def attr(*args):
+    """Decorator for adding attributes to WithAttributes.
+
+    :param args: The name of attributes to add.
+    :return: A callable that when applied to a WithAttributes will
+        alter its id to enumerate the added attributes.
+    """
+    def decorate(fn):
+        if not safe_hasattr(fn, '__testtools_attrs'):
+            fn.__testtools_attrs = set()
+        fn.__testtools_attrs.update(args)
+        return fn
+    return decorate
+
+
+class WithAttributes(object):
+    """A mix-in class for modifying test id by attributes.
+
+    e.g.
+    >>> class MyTest(WithAttributes, TestCase):
+    ...    @attr('foo')
+    ...    def test_bar(self):
+    ...        pass
+    >>> MyTest('test_bar').id()
+    testtools.testcase.MyTest/test_bar[foo]
+    """
+
+    def id(self):
+        orig = super(WithAttributes, self).id()
+        # Depends on testtools.TestCase._get_test_method, be nice to support
+        # plain unittest.
+        fn = self._get_test_method()
+        attributes = getattr(fn, '__testtools_attrs', None)
+        if not attributes:
+            return orig
+        return orig + '[' + ','.join(sorted(attributes)) + ']'
+
+
+def skip(reason):
+    """A decorator to skip unit tests.
+
+    This is just syntactic sugar so users don't have to change any of their
+    unit tests in order to migrate to python 2.7, which provides the
+    @unittest.skip decorator.
+    """
+    def decorator(test_item):
+        # This attribute signals to RunTest._run_core that the entire test
+        # must be skipped - including setUp and tearDown. This makes us
+        # compatible with testtools.skip* functions, which set the same
+        # attributes.
+        test_item.__unittest_skip__ = True
+        test_item.__unittest_skip_why__ = reason
+        if wraps is not None:
+            @wraps(test_item)
+            def skip_wrapper(*args, **kwargs):
+                raise TestCase.skipException(reason)
+        else:
+            def skip_wrapper(test_item):
+                test_item.skip(reason)
+        return skip_wrapper
+    return decorator
+
+
+def skipIf(condition, reason):
+    """A decorator to skip a test if the condition is true."""
+    if condition:
+        return skip(reason)
+    def _id(obj):
+        return obj
+    return _id
+
+
+def skipUnless(condition, reason):
+    """A decorator to skip a test unless the condition is true."""
+    if not condition:
+        return skip(reason)
+    def _id(obj):
+        return obj
+    return _id
+
+
+class ExpectedException:
+    """A context manager to handle expected exceptions.
+
+      def test_foo(self):
+          with ExpectedException(ValueError, 'fo.*'):
+              raise ValueError('foo')
+
+    will pass.  If the raised exception has a type other than the specified
+    type, it will be re-raised.  If it has a 'str()' that does not match the
+    given regular expression, an AssertionError will be raised.  If no
+    exception is raised, an AssertionError will be raised.
+    """
+
+    def __init__(self, exc_type, value_re=None, msg=None):
+        """Construct an `ExpectedException`.
+
+        :param exc_type: The type of exception to expect.
+        :param value_re: A regular expression to match against the
+            'str()' of the raised exception.
+        :param msg: An optional message explaining the failure.
+        """
+        self.exc_type = exc_type
+        self.value_re = value_re
+        self.msg = msg
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        if exc_type is None:
+            error_msg = '%s not raised.' % self.exc_type.__name__
+            if self.msg:
+                error_msg = error_msg + ' : ' + self.msg
+            raise AssertionError(error_msg)
+        if exc_type != self.exc_type:
+            return False
+        if self.value_re:
+            matcher = MatchesException(self.exc_type, self.value_re)
+            if self.msg:
+                matcher = Annotate(self.msg, matcher)
+            mismatch = matcher.match((exc_type, exc_value, traceback))
+            if mismatch:
+                raise AssertionError(mismatch.describe())
+        return True
+
+
+class Nullary(object):
+    """Turn a callable into a nullary callable.
+
+    The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
+    preserves the ``repr()`` of ``f``.
+    """
+
+    def __init__(self, callable_object, *args, **kwargs):
+        self._callable_object = callable_object
+        self._args = args
+        self._kwargs = kwargs
+
+    def __call__(self):
+        return self._callable_object(*self._args, **self._kwargs)
+
+    def __repr__(self):
+        return repr(self._callable_object)
+
+
+class DecorateTestCaseResult(object):
+    """Decorate a TestCase and permit customisation of the result for runs."""
+
+    def __init__(self, case, callout, before_run=None, after_run=None):
+        """Construct a DecorateTestCaseResult.
+
+        :param case: The case to decorate.
+        :param callout: A callback to call when run/__call__/debug is called.
+            Must take a result parameter and return a result object to be used.
+            For instance: lambda result: result.
+        :param before_run: If set, call this with the decorated result before
+            calling into the decorated run/__call__ method.
+        :param before_run: If set, call this with the decorated result after
+            calling into the decorated run/__call__ method.
+        """
+        self.decorated = case
+        self.callout = callout
+        self.before_run = before_run
+        self.after_run = after_run
+
+    def _run(self, result, run_method):
+        result = self.callout(result)
+        if self.before_run:
+            self.before_run(result)
+        try:
+            return run_method(result)
+        finally:
+            if self.after_run:
+                self.after_run(result)
+
+    def run(self, result=None):
+        self._run(result, self.decorated.run)
+
+    def __call__(self, result=None):
+        self._run(result, self.decorated)
+
+    def __getattr__(self, name):
+        return getattr(self.decorated, name)
+
+    def __delattr__(self, name):
+        delattr(self.decorated, name)
+
+    def __setattr__(self, name, value):
+        if name in ('decorated', 'callout', 'before_run', 'after_run'):
+            self.__dict__[name] = value
+            return
+        setattr(self.decorated, name, value)
+
+
+# Signal that this is part of the testing framework, and that code from this
+# should not normally appear in tracebacks.
+__unittest = True
diff --git a/third_party/testtools/testtools/testresult/__init__.py b/third_party/testtools/testtools/testresult/__init__.py
new file mode 100644
index 0000000..5bf8f9c
--- /dev/null
+++ b/third_party/testtools/testtools/testresult/__init__.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+"""Test result objects."""
+
+__all__ = [
+    'CopyStreamResult',
+    'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
+    'MultiTestResult',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamResultRouter',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
+    'Tagger',
+    'TestByTestResult',
+    'TestControl',
+    'TestResult',
+    'TestResultDecorator',
+    'TextTestResult',
+    'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
+    ]
+
+from testtools.testresult.real import (
+    CopyStreamResult,
+    ExtendedToOriginalDecorator,
+    ExtendedToStreamDecorator,
+    MultiTestResult,
+    StreamFailFast,
+    StreamResult,
+    StreamResultRouter,
+    StreamSummary,
+    StreamTagger,
+    StreamToDict,
+    StreamToExtendedDecorator,
+    StreamToQueue,
+    Tagger,
+    TestByTestResult,
+    TestControl,
+    TestResult,
+    TestResultDecorator,
+    TextTestResult,
+    ThreadsafeForwardingResult,
+    TimestampingStreamResult,
+    )
diff --git a/third_party/testtools/testtools/testresult/doubles.py b/third_party/testtools/testtools/testresult/doubles.py
new file mode 100644
index 0000000..d86f7fa
--- /dev/null
+++ b/third_party/testtools/testtools/testresult/doubles.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2009-2010 testtools developers. See LICENSE for details.
+
+"""Doubles of test result objects, useful for testing unittest code."""
+
+__all__ = [
+    'Python26TestResult',
+    'Python27TestResult',
+    'ExtendedTestResult',
+    'StreamResult',
+    ]
+
+
+from testtools.tags import TagContext
+
+
+class LoggingBase(object):
+    """Basic support for logging of results."""
+
+    def __init__(self):
+        self._events = []
+        self.shouldStop = False
+        self._was_successful = True
+        self.testsRun = 0
+
+
+class Python26TestResult(LoggingBase):
+    """A precisely python 2.6 like test result, that logs."""
+
+    def addError(self, test, err):
+        self._was_successful = False
+        self._events.append(('addError', test, err))
+
+    def addFailure(self, test, err):
+        self._was_successful = False
+        self._events.append(('addFailure', test, err))
+
+    def addSuccess(self, test):
+        self._events.append(('addSuccess', test))
+
+    def startTest(self, test):
+        self._events.append(('startTest', test))
+        self.testsRun += 1
+
+    def stop(self):
+        self.shouldStop = True
+
+    def stopTest(self, test):
+        self._events.append(('stopTest', test))
+
+    def wasSuccessful(self):
+        return self._was_successful
+
+
+class Python27TestResult(Python26TestResult):
+    """A precisely python 2.7 like test result, that logs."""
+
+    def __init__(self):
+        super(Python27TestResult, self).__init__()
+        self.failfast = False
+
+    def addError(self, test, err):
+        super(Python27TestResult, self).addError(test, err)
+        if self.failfast:
+            self.stop()
+
+    def addFailure(self, test, err):
+        super(Python27TestResult, self).addFailure(test, err)
+        if self.failfast:
+            self.stop()
+
+    def addExpectedFailure(self, test, err):
+        self._events.append(('addExpectedFailure', test, err))
+
+    def addSkip(self, test, reason):
+        self._events.append(('addSkip', test, reason))
+
+    def addUnexpectedSuccess(self, test):
+        self._events.append(('addUnexpectedSuccess', test))
+        if self.failfast:
+            self.stop()
+
+    def startTestRun(self):
+        self._events.append(('startTestRun',))
+
+    def stopTestRun(self):
+        self._events.append(('stopTestRun',))
+
+
+class ExtendedTestResult(Python27TestResult):
+    """A test result like the proposed extended unittest result API."""
+
+    def __init__(self):
+        super(ExtendedTestResult, self).__init__()
+        self._tags = TagContext()
+
+    def addError(self, test, err=None, details=None):
+        self._was_successful = False
+        self._events.append(('addError', test, err or details))
+
+    def addFailure(self, test, err=None, details=None):
+        self._was_successful = False
+        self._events.append(('addFailure', test, err or details))
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._events.append(('addExpectedFailure', test, err or details))
+
+    def addSkip(self, test, reason=None, details=None):
+        self._events.append(('addSkip', test, reason or details))
+
+    def addSuccess(self, test, details=None):
+        if details:
+            self._events.append(('addSuccess', test, details))
+        else:
+            self._events.append(('addSuccess', test))
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._was_successful = False
+        if details is not None:
+            self._events.append(('addUnexpectedSuccess', test, details))
+        else:
+            self._events.append(('addUnexpectedSuccess', test))
+
+    def progress(self, offset, whence):
+        self._events.append(('progress', offset, whence))
+
+    def startTestRun(self):
+        super(ExtendedTestResult, self).startTestRun()
+        self._was_successful = True
+        self._tags = TagContext()
+
+    def startTest(self, test):
+        super(ExtendedTestResult, self).startTest(test)
+        self._tags = TagContext(self._tags)
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+        super(ExtendedTestResult, self).stopTest(test)
+
+    @property
+    def current_tags(self):
+        return self._tags.get_current_tags()
+
+    def tags(self, new_tags, gone_tags):
+        self._tags.change_tags(new_tags, gone_tags)
+        self._events.append(('tags', new_tags, gone_tags))
+
+    def time(self, time):
+        self._events.append(('time', time))
+
+    def wasSuccessful(self):
+        return self._was_successful
+
+
+class StreamResult(object):
+    """A StreamResult implementation for testing.
+
+    All events are logged to _events.
+    """
+
+    def __init__(self):
+        self._events = []
+
+    def startTestRun(self):
+        self._events.append(('startTestRun',))
+
+    def stopTestRun(self):
+        self._events.append(('stopTestRun',))
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        self._events.append(('status', test_id, test_status, test_tags,
+            runnable, file_name, file_bytes, eof, mime_type, route_code,
+            timestamp))
diff --git a/third_party/testtools/testtools/testresult/real.py b/third_party/testtools/testtools/testresult/real.py
new file mode 100644
index 0000000..1453041
--- /dev/null
+++ b/third_party/testtools/testtools/testresult/real.py
@@ -0,0 +1,1777 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test results and related things."""
+
+__metaclass__ = type
+__all__ = [
+    'ExtendedToOriginalDecorator',
+    'ExtendedToStreamDecorator',
+    'MultiTestResult',
+    'StreamFailFast',
+    'StreamResult',
+    'StreamSummary',
+    'StreamTagger',
+    'StreamToDict',
+    'StreamToExtendedDecorator',
+    'StreamToQueue',
+    'Tagger',
+    'TestControl',
+    'TestResult',
+    'TestResultDecorator',
+    'ThreadsafeForwardingResult',
+    'TimestampingStreamResult',
+    ]
+
+import datetime
+from operator import methodcaller
+import sys
+import unittest
+
+from extras import safe_hasattr, try_import, try_imports
+parse_mime_type = try_import('mimeparse.parse_mime_type')
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools.compat import str_is_unicode, _u, _b
+from testtools.content import (
+    Content,
+    text_content,
+    TracebackContent,
+    )
+from testtools.content_type import ContentType
+from testtools.tags import TagContext
+# circular import
+# from testtools.testcase import PlaceHolder
+PlaceHolder = None
+
+# From http://docs.python.org/library/datetime.html
+_ZERO = datetime.timedelta(0)
+
+# A UTC class.
+
+class UTC(datetime.tzinfo):
+    """UTC"""
+
+    def utcoffset(self, dt):
+        return _ZERO
+
+    def tzname(self, dt):
+        return "UTC"
+
+    def dst(self, dt):
+        return _ZERO
+
+utc = UTC()
+
+
+class TestResult(unittest.TestResult):
+    """Subclass of unittest.TestResult extending the protocol for flexability.
+
+    This test result supports an experimental protocol for providing additional
+    data to in test outcomes. All the outcome methods take an optional dict
+    'details'. If supplied any other detail parameters like 'err' or 'reason'
+    should not be provided. The details dict is a mapping from names to
+    MIME content objects (see testtools.content). This permits attaching
+    tracebacks, log files, or even large objects like databases that were
+    part of the test fixture. Until this API is accepted into upstream
+    Python it is considered experimental: it may be replaced at any point
+    by a newer version more in line with upstream Python. Compatibility would
+    be aimed for in this case, but may not be possible.
+
+    :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
+    """
+
+    def __init__(self, failfast=False):
+        # startTestRun resets all attributes, and older clients don't know to
+        # call startTestRun, so it is called once here.
+        # Because subclasses may reasonably not expect this, we call the
+        # specific version we want to run.
+        self.failfast = failfast
+        TestResult.startTestRun(self)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        """Called when a test has failed in an expected manner.
+
+        Like with addSuccess and addError, testStopped should still be called.
+
+        :param test: The test that has been skipped.
+        :param err: The exc_info of the error that was raised.
+        :return: None
+        """
+        # This is the python 2.7 implementation
+        self.expectedFailures.append(
+            (test, self._err_details_to_string(test, err, details)))
+
+    def addError(self, test, err=None, details=None):
+        """Called when an error has occurred. 'err' is a tuple of values as
+        returned by sys.exc_info().
+
+        :param details: Alternative way to supply details about the outcome.
+            see the class docstring for more information.
+        """
+        self.errors.append((test,
+            self._err_details_to_string(test, err, details)))
+        if self.failfast:
+            self.stop()
+
+    def addFailure(self, test, err=None, details=None):
+        """Called when an error has occurred. 'err' is a tuple of values as
+        returned by sys.exc_info().
+
+        :param details: Alternative way to supply details about the outcome.
+            see the class docstring for more information.
+        """
+        self.failures.append((test,
+            self._err_details_to_string(test, err, details)))
+        if self.failfast:
+            self.stop()
+
+    def addSkip(self, test, reason=None, details=None):
+        """Called when a test has been skipped rather than running.
+
+        Like with addSuccess and addError, testStopped should still be called.
+
+        This must be called by the TestCase. 'addError' and 'addFailure' will
+        not call addSkip, since they have no assumptions about the kind of
+        errors that a test can raise.
+
+        :param test: The test that has been skipped.
+        :param reason: The reason for the test being skipped. For instance,
+            u"pyGL is not available".
+        :param details: Alternative way to supply details about the outcome.
+            see the class docstring for more information.
+        :return: None
+        """
+        if reason is None:
+            reason = details.get('reason')
+            if reason is None:
+                reason = 'No reason given'
+            else:
+                reason = reason.as_text()
+        skip_list = self.skip_reasons.setdefault(reason, [])
+        skip_list.append(test)
+
+    def addSuccess(self, test, details=None):
+        """Called when a test succeeded."""
+
+    def addUnexpectedSuccess(self, test, details=None):
+        """Called when a test was expected to fail, but succeed."""
+        self.unexpectedSuccesses.append(test)
+        if self.failfast:
+            self.stop()
+
+    def wasSuccessful(self):
+        """Has this result been successful so far?
+
+        If there have been any errors, failures or unexpected successes,
+        return False.  Otherwise, return True.
+
+        Note: This differs from standard unittest in that we consider
+        unexpected successes to be equivalent to failures, rather than
+        successes.
+        """
+        return not (self.errors or self.failures or self.unexpectedSuccesses)
+
+    def _err_details_to_string(self, test, err=None, details=None):
+        """Convert an error in exc_info form or a contents dict to a string."""
+        if err is not None:
+            return TracebackContent(err, test).as_text()
+        return _details_to_str(details, special='traceback')
+
+    def _exc_info_to_unicode(self, err, test):
+        # Deprecated.  Only present because subunit upcalls to it.  See
+        # <https://bugs.launchpad.net/testtools/+bug/929063>.
+        return TracebackContent(err, test).as_text()
+
+    def _now(self):
+        """Return the current 'test time'.
+
+        If the time() method has not been called, this is equivalent to
+        datetime.now(), otherwise its the last supplied datestamp given to the
+        time() method.
+        """
+        if self.__now is None:
+            return datetime.datetime.now(utc)
+        else:
+            return self.__now
+
+    def startTestRun(self):
+        """Called before a test run starts.
+
+        New in Python 2.7. The testtools version resets the result to a
+        pristine condition ready for use in another test run.  Note that this
+        is different from Python 2.7's startTestRun, which does nothing.
+        """
+        # failfast is reset by the super __init__, so stash it.
+        failfast = self.failfast
+        super(TestResult, self).__init__()
+        self.skip_reasons = {}
+        self.__now = None
+        self._tags = TagContext()
+        # -- Start: As per python 2.7 --
+        self.expectedFailures = []
+        self.unexpectedSuccesses = []
+        self.failfast = failfast
+        # -- End:   As per python 2.7 --
+
+    def stopTestRun(self):
+        """Called after a test run completes
+
+        New in python 2.7
+        """
+
+    def startTest(self, test):
+        super(TestResult, self).startTest(test)
+        self._tags = TagContext(self._tags)
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+        super(TestResult, self).stopTest(test)
+
+    @property
+    def current_tags(self):
+        """The currently set tags."""
+        return self._tags.get_current_tags()
+
+    def tags(self, new_tags, gone_tags):
+        """Add and remove tags from the test.
+
+        :param new_tags: A set of tags to be added to the stream.
+        :param gone_tags: A set of tags to be removed from the stream.
+        """
+        self._tags.change_tags(new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        """Provide a timestamp to represent the current time.
+
+        This is useful when test activity is time delayed, or happening
+        concurrently and getting the system time between API calls will not
+        accurately represent the duration of tests (or the whole run).
+
+        Calling time() sets the datetime used by the TestResult object.
+        Time is permitted to go backwards when using this call.
+
+        :param a_datetime: A datetime.datetime object with TZ information or
+            None to reset the TestResult to gathering time from the system.
+        """
+        self.__now = a_datetime
+
+    def done(self):
+        """Called when the test runner is done.
+
+        deprecated in favour of stopTestRun.
+        """
+
+
+class StreamResult(object):
+    """A test result for reporting the activity of a test run.
+
+    Typical use
+
+      >>> result = StreamResult()
+      >>> result.startTestRun()
+      >>> try:
+      ...     case.run(result)
+      ... finally:
+      ...     result.stopTestRun()
+
+    The case object will be either a TestCase or a TestSuite, and
+    generally make a sequence of calls like::
+
+      >>> result.status(self.id(), 'inprogress')
+      >>> result.status(self.id(), 'success')
+
+    General concepts
+
+    StreamResult is built to process events that are emitted by tests during a
+    test run or test enumeration. The test run may be running concurrently, and
+    even be spread out across multiple machines.
+
+    All events are timestamped to prevent network buffering or scheduling
+    latency causing false timing reports. Timestamps are datetime objects in
+    the UTC timezone.
+
+    A route_code is a unicode string that identifies where a particular test
+    run. This is optional in the API but very useful when multiplexing multiple
+    streams together as it allows identification of interactions between tests
+    that were run on the same hardware or in the same test process. Generally
+    actual tests never need to bother with this - it is added and processed
+    by StreamResult's that do multiplexing / run analysis. route_codes are
+    also used to route stdin back to pdb instances.
+
+    The StreamResult base class does no accounting or processing, rather it
+    just provides an empty implementation of every method, suitable for use
+    as a base class regardless of intent.
+    """
+
+    def startTestRun(self):
+        """Start a test run.
+
+        This will prepare the test result to process results (which might imply
+        connecting to a database or remote machine).
+        """
+
+    def stopTestRun(self):
+        """Stop a test run.
+
+        This informs the result that no more test updates will be received. At
+        this point any test ids that have started and not completed can be
+        considered failed-or-hung.
+        """
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        """Inform the result about a test status.
+
+        :param test_id: The test whose status is being reported. None to
+            report status about the test run as a whole.
+        :param test_status: The status for the test. There are two sorts of
+            status - interim and final status events. As many interim events
+            can be generated as desired, but only one final event. After a
+            final status event any further file or status events from the
+            same test_id+route_code may be discarded or associated with a new
+            test by the StreamResult. (But no exception will be thrown).
+
+            Interim states:
+              * None - no particular status is being reported, or status being
+                reported is not associated with a test (e.g. when reporting on
+                stdout / stderr chatter).
+              * inprogress - the test is currently running. Emitted by tests when
+                they start running and at any intermediary point they might
+                choose to indicate their continual operation.
+
+            Final states:
+              * exists - the test exists. This is used when a test is not being
+                executed. Typically this is when querying what tests could be run
+                in a test run (which is useful for selecting tests to run).
+              * xfail - the test failed but that was expected. This is purely
+                informative - the test is not considered to be a failure.
+              * uxsuccess - the test passed but was expected to fail. The test
+                will be considered a failure.
+              * success - the test has finished without error.
+              * fail - the test failed (or errored). The test will be considered
+                a failure.
+              * skip - the test was selected to run but chose to be skipped. E.g.
+                a test dependency was missing. This is purely informative - the
+                test is not considered to be a failure.
+
+        :param test_tags: Optional set of tags to apply to the test. Tags
+            have no intrinsic meaning - that is up to the test author.
+        :param runnable: Allows status reports to mark that they are for
+            tests which are not able to be explicitly run. For instance,
+            subtests will report themselves as non-runnable.
+        :param file_name: The name for the file_bytes. Any unicode string may
+            be used. While there is no semantic value attached to the name
+            of any attachment, the names 'stdout' and 'stderr' and 'traceback'
+            are recommended for use only for output sent to stdout, stderr and
+            tracebacks of exceptions. When file_name is supplied, file_bytes
+            must be a bytes instance.
+        :param file_bytes: A bytes object containing content for the named
+            file. This can just be a single chunk of the file - emitting
+            another file event with more later. Must be None unleses a
+            file_name is supplied.
+        :param eof: True if this chunk is the last chunk of the file, any
+            additional chunks with the same name should be treated as an error
+            and discarded. Ignored unless file_name has been supplied.
+        :param mime_type: An optional MIME type for the file. stdout and
+            stderr will generally be "text/plain; charset=utf8". If None,
+            defaults to application/octet-stream. Ignored unless file_name
+            has been supplied.
+        """
+
+
+def domap(*args, **kwargs):
+    return list(map(*args, **kwargs))
+
+
+class CopyStreamResult(StreamResult):
+    """Copies all event it receives to multiple results.
+
+    This provides an easy facility for combining multiple StreamResults.
+
+    For TestResult the equivalent class was ``MultiTestResult``.
+    """
+
+    def __init__(self, targets):
+        super(CopyStreamResult, self).__init__()
+        self.targets = targets
+
+    def startTestRun(self):
+        super(CopyStreamResult, self).startTestRun()
+        domap(methodcaller('startTestRun'), self.targets)
+
+    def stopTestRun(self):
+        super(CopyStreamResult, self).stopTestRun()
+        domap(methodcaller('stopTestRun'), self.targets)
+
+    def status(self, *args, **kwargs):
+        super(CopyStreamResult, self).status(*args, **kwargs)
+        domap(methodcaller('status', *args, **kwargs), self.targets)
+
+
+class StreamFailFast(StreamResult):
+    """Call the supplied callback if an error is seen in a stream.
+
+    An example callback::
+
+       def do_something():
+           pass
+    """
+
+    def __init__(self, on_error):
+        self.on_error = on_error
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        if test_status in ('uxsuccess', 'fail'):
+            self.on_error()
+
+
+class StreamResultRouter(StreamResult):
+    """A StreamResult that routes events.
+
+    StreamResultRouter forwards received events to another StreamResult object,
+    selected by a dynamic forwarding policy. Events where no destination is
+    found are forwarded to the fallback StreamResult, or an error is raised.
+
+    Typical use is to construct a router with a fallback and then either
+    create up front mapping rules, or create them as-needed from the fallback
+    handler::
+
+      >>> router = StreamResultRouter()
+      >>> sink = doubles.StreamResult()
+      >>> router.add_rule(sink, 'route_code_prefix', route_prefix='0',
+      ...     consume_route=True)
+      >>> router.status(test_id='foo', route_code='0/1', test_status='uxsuccess')
+
+    StreamResultRouter has no buffering.
+
+    When adding routes (and for the fallback) whether to call startTestRun and
+    stopTestRun or to not call them is controllable by passing
+    'do_start_stop_run'. The default is to call them for the fallback only.
+    If a route is added after startTestRun has been called, and
+    do_start_stop_run is True then startTestRun is called immediately on the
+    new route sink.
+
+    There is no a-priori defined lookup order for routes: if they are ambiguous
+    the behaviour is undefined. Only a single route is chosen for any event.
+    """
+
+    _policies = {}
+
+    def __init__(self, fallback=None, do_start_stop_run=True):
+        """Construct a StreamResultRouter with optional fallback.
+
+        :param fallback: A StreamResult to forward events to when no route
+            exists for them.
+        :param do_start_stop_run: If False do not pass startTestRun and
+            stopTestRun onto the fallback.
+        """
+        self.fallback = fallback
+        self._route_code_prefixes = {}
+        self._test_ids = {}
+        # Records sinks that should have do_start_stop_run called on them.
+        self._sinks = []
+        if do_start_stop_run and fallback:
+            self._sinks.append(fallback)
+        self._in_run = False
+
+    def startTestRun(self):
+        super(StreamResultRouter, self).startTestRun()
+        for sink in self._sinks:
+            sink.startTestRun()
+        self._in_run = True
+
+    def stopTestRun(self):
+        super(StreamResultRouter, self).stopTestRun()
+        for sink in self._sinks:
+            sink.stopTestRun()
+        self._in_run = False
+
+    def status(self, **kwargs):
+        route_code = kwargs.get('route_code', None)
+        test_id = kwargs.get('test_id', None)
+        if route_code is not None:
+            prefix = route_code.split('/')[0]
+        else:
+            prefix = route_code
+        if prefix in self._route_code_prefixes:
+            target, consume_route = self._route_code_prefixes[prefix]
+            if route_code is not None and consume_route:
+                route_code = route_code[len(prefix) + 1:]
+                if not route_code:
+                    route_code = None
+                kwargs['route_code'] = route_code
+        elif test_id in self._test_ids:
+            target = self._test_ids[test_id]
+        else:
+            target = self.fallback
+        target.status(**kwargs)
+
+    def add_rule(self, sink, policy, do_start_stop_run=False, **policy_args):
+        """Add a rule to route events to sink when they match a given policy.
+
+        :param sink: A StreamResult to receive events.
+        :param policy: A routing policy. Valid policies are
+            'route_code_prefix' and 'test_id'.
+        :param do_start_stop_run: If True then startTestRun and stopTestRun
+            events will be passed onto this sink.
+
+        :raises: ValueError if the policy is unknown
+        :raises: TypeError if the policy is given arguments it cannot handle.
+
+        ``route_code_prefix`` routes events based on a prefix of the route
+        code in the event. It takes a ``route_prefix`` argument to match on
+        (e.g. '0') and a ``consume_route`` argument, which, if True, removes
+        the prefix from the ``route_code`` when forwarding events.
+
+        ``test_id`` routes events based on the test id.  It takes a single
+        argument, ``test_id``.  Use ``None`` to select non-test events.
+        """
+        policy_method = StreamResultRouter._policies.get(policy, None)
+        if not policy_method:
+            raise ValueError("bad policy %r" % (policy,))
+        policy_method(self, sink, **policy_args)
+        if do_start_stop_run:
+            self._sinks.append(sink)
+        if self._in_run:
+            sink.startTestRun()
+
+    def _map_route_code_prefix(self, sink, route_prefix, consume_route=False):
+        if '/' in route_prefix:
+            raise TypeError(
+                "%r is more than one route step long" % (route_prefix,))
+        self._route_code_prefixes[route_prefix] = (sink, consume_route)
+    _policies['route_code_prefix'] = _map_route_code_prefix
+
+    def _map_test_id(self, sink, test_id):
+        self._test_ids[test_id] = sink
+    _policies['test_id'] = _map_test_id
+
+
+class StreamTagger(CopyStreamResult):
+    """Adds or discards tags from StreamResult events."""
+
+    def __init__(self, targets, add=None, discard=None):
+        """Create a StreamTagger.
+
+        :param targets: A list of targets to forward events onto.
+        :param add: Either None or an iterable of tags to add to each event.
+        :param discard: Either None or an iterable of tags to discard from each
+            event.
+        """
+        super(StreamTagger, self).__init__(targets)
+        self.add = frozenset(add or ())
+        self.discard = frozenset(discard or ())
+
+    def status(self, *args, **kwargs):
+        test_tags = kwargs.get('test_tags') or set()
+        test_tags.update(self.add)
+        test_tags.difference_update(self.discard)
+        kwargs['test_tags'] = test_tags or None
+        super(StreamTagger, self).status(*args, **kwargs)
+
+
+class StreamToDict(StreamResult):
+    """A specialised StreamResult that emits a callback as tests complete.
+
+    Top level file attachments are simply discarded. Hung tests are detected
+    by stopTestRun and notified there and then.
+
+    The callback is passed a dict with the following keys:
+
+      * id: the test id.
+      * tags: The tags for the test. A set of unicode strings.
+      * details: A dict of file attachments - ``testtools.content.Content``
+        objects.
+      * status: One of the StreamResult status codes (including inprogress) or
+        'unknown' (used if only file events for a test were received...)
+      * timestamps: A pair of timestamps - the first one received with this
+        test id, and the one in the event that triggered the notification.
+        Hung tests have a None for the second end event. Timestamps are not
+        compared - their ordering is purely order received in the stream.
+
+    Only the most recent tags observed in the stream are reported.
+    """
+
+    def __init__(self, on_test):
+        """Create a StreamToDict calling on_test on test completions.
+
+        :param on_test: A callback that accepts one parameter - a dict
+            describing a test.
+        """
+        super(StreamToDict, self).__init__()
+        self.on_test = on_test
+        if parse_mime_type is None:
+            raise ImportError("mimeparse module missing.")
+
+    def startTestRun(self):
+        super(StreamToDict, self).startTestRun()
+        self._inprogress = {}
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        super(StreamToDict, self).status(test_id, test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        key = self._ensure_key(test_id, route_code, timestamp)
+        # update fields
+        if not key:
+            return
+        if test_status is not None:
+            self._inprogress[key]['status'] = test_status
+        self._inprogress[key]['timestamps'][1] = timestamp
+        case = self._inprogress[key]
+        if file_name is not None:
+            if file_name not in case['details']:
+                if mime_type is None:
+                    mime_type = 'application/octet-stream'
+                primary, sub, parameters = parse_mime_type(mime_type)
+                if 'charset' in parameters:
+                    if ',' in parameters['charset']:
+                        # testtools was emitting a bad encoding, workaround it,
+                        # Though this does lose data - probably want to drop
+                        # this in a few releases.
+                        parameters['charset'] = parameters['charset'][
+                            :parameters['charset'].find(',')]
+                content_type = ContentType(primary, sub, parameters)
+                content_bytes = []
+                case['details'][file_name] = Content(
+                    content_type, lambda:content_bytes)
+            case['details'][file_name].iter_bytes().append(file_bytes)
+        if test_tags is not None:
+            self._inprogress[key]['tags'] = test_tags
+        # notify completed tests.
+        if test_status not in (None, 'inprogress'):
+            self.on_test(self._inprogress.pop(key))
+
+    def stopTestRun(self):
+        super(StreamToDict, self).stopTestRun()
+        while self._inprogress:
+            case = self._inprogress.popitem()[1]
+            case['timestamps'][1] = None
+            self.on_test(case)
+
+    def _ensure_key(self, test_id, route_code, timestamp):
+        if test_id is None:
+            return
+        key = (test_id, route_code)
+        if key not in self._inprogress:
+            self._inprogress[key] = {
+                'id': test_id,
+                'tags': set(),
+                'details': {},
+                'status': 'unknown',
+                'timestamps': [timestamp, None]}
+        return key
+
+
+_status_map = {
+    'inprogress': 'addFailure',
+    'unknown': 'addFailure',
+    'success': 'addSuccess',
+    'skip': 'addSkip',
+    'fail': 'addFailure',
+    'xfail': 'addExpectedFailure',
+    'uxsuccess': 'addUnexpectedSuccess',
+    }
+
+
+def test_dict_to_case(test_dict):
+    """Convert a test dict into a TestCase object.
+
+    :param test_dict: A test dict as generated by StreamToDict.
+    :return: A PlaceHolder test object.
+    """
+    # Circular import.
+    global PlaceHolder
+    if PlaceHolder is None:
+        from testtools.testcase import PlaceHolder
+    outcome = _status_map[test_dict['status']]
+    return PlaceHolder(test_dict['id'], outcome=outcome,
+        details=test_dict['details'], tags=test_dict['tags'],
+        timestamps=test_dict['timestamps'])
+
+
+class StreamSummary(StreamToDict):
+    """A specialised StreamResult that summarises a stream.
+
+    The summary uses the same representation as the original
+    unittest.TestResult contract, allowing it to be consumed by any test
+    runner.
+    """
+
+    def __init__(self):
+        super(StreamSummary, self).__init__(self._gather_test)
+        self._handle_status = {
+            'success': self._success,
+            'skip': self._skip,
+            'exists': self._exists,
+            'fail': self._fail,
+            'xfail': self._xfail,
+            'uxsuccess': self._uxsuccess,
+            'unknown': self._incomplete,
+            'inprogress': self._incomplete,
+            }
+
+    def startTestRun(self):
+        super(StreamSummary, self).startTestRun()
+        self.failures = []
+        self.errors = []
+        self.testsRun = 0
+        self.skipped = []
+        self.expectedFailures = []
+        self.unexpectedSuccesses = []
+
+    def wasSuccessful(self):
+        """Return False if any failure has occured.
+
+        Note that incomplete tests can only be detected when stopTestRun is
+        called, so that should be called before checking wasSuccessful.
+        """
+        return (not self.failures and not self.errors)
+
+    def _gather_test(self, test_dict):
+        if test_dict['status'] == 'exists':
+            return
+        self.testsRun += 1
+        case = test_dict_to_case(test_dict)
+        self._handle_status[test_dict['status']](case)
+
+    def _incomplete(self, case):
+        self.errors.append((case, "Test did not complete"))
+
+    def _success(self, case):
+        pass
+
+    def _skip(self, case):
+        if 'reason' not in case._details:
+            reason = "Unknown"
+        else:
+            reason = case._details['reason'].as_text()
+        self.skipped.append((case, reason))
+
+    def _exists(self, case):
+        pass
+
+    def _fail(self, case):
+        message = _details_to_str(case._details, special="traceback")
+        self.errors.append((case, message))
+
+    def _xfail(self, case):
+        message = _details_to_str(case._details, special="traceback")
+        self.expectedFailures.append((case, message))
+
+    def _uxsuccess(self, case):
+        case._outcome = 'addUnexpectedSuccess'
+        self.unexpectedSuccesses.append(case)
+
+
+class TestControl(object):
+    """Controls a running test run, allowing it to be interrupted.
+
+    :ivar shouldStop: If True, tests should not run and should instead
+        return immediately. Similarly a TestSuite should check this between
+        each test and if set stop dispatching any new tests and return.
+    """
+
+    def __init__(self):
+        super(TestControl, self).__init__()
+        self.shouldStop = False
+
+    def stop(self):
+        """Indicate that tests should stop running."""
+        self.shouldStop = True
+
+
+class MultiTestResult(TestResult):
+    """A test result that dispatches to many test results."""
+
+    def __init__(self, *results):
+        # Setup _results first, as the base class __init__ assigns to failfast.
+        self._results = list(map(ExtendedToOriginalDecorator, results))
+        super(MultiTestResult, self).__init__()
+
+    def __repr__(self):
+        return '<%s (%s)>' % (
+            self.__class__.__name__, ', '.join(map(repr, self._results)))
+
+    def _dispatch(self, message, *args, **kwargs):
+        return tuple(
+            getattr(result, message)(*args, **kwargs)
+            for result in self._results)
+
+    def _get_failfast(self):
+        return getattr(self._results[0], 'failfast', False)
+    def _set_failfast(self, value):
+        self._dispatch('__setattr__', 'failfast', value)
+    failfast = property(_get_failfast, _set_failfast)
+
+    def _get_shouldStop(self):
+        return any(self._dispatch('__getattr__', 'shouldStop'))
+    def _set_shouldStop(self, value):
+        # Called because we subclass TestResult. Probably should not do that.
+        pass
+    shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+    def startTest(self, test):
+        super(MultiTestResult, self).startTest(test)
+        return self._dispatch('startTest', test)
+
+    def stop(self):
+        return self._dispatch('stop')
+
+    def stopTest(self, test):
+        super(MultiTestResult, self).stopTest(test)
+        return self._dispatch('stopTest', test)
+
+    def addError(self, test, error=None, details=None):
+        return self._dispatch('addError', test, error, details=details)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        return self._dispatch(
+            'addExpectedFailure', test, err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        return self._dispatch('addFailure', test, err, details=details)
+
+    def addSkip(self, test, reason=None, details=None):
+        return self._dispatch('addSkip', test, reason, details=details)
+
+    def addSuccess(self, test, details=None):
+        return self._dispatch('addSuccess', test, details=details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        return self._dispatch('addUnexpectedSuccess', test, details=details)
+
+    def startTestRun(self):
+        super(MultiTestResult, self).startTestRun()
+        return self._dispatch('startTestRun')
+
+    def stopTestRun(self):
+        return self._dispatch('stopTestRun')
+
+    def tags(self, new_tags, gone_tags):
+        super(MultiTestResult, self).tags(new_tags, gone_tags)
+        return self._dispatch('tags', new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        return self._dispatch('time', a_datetime)
+
+    def done(self):
+        return self._dispatch('done')
+
+    def wasSuccessful(self):
+        """Was this result successful?
+
+        Only returns True if every constituent result was successful.
+        """
+        return all(self._dispatch('wasSuccessful'))
+
+
+class TextTestResult(TestResult):
+    """A TestResult which outputs activity to a text stream."""
+
+    def __init__(self, stream, failfast=False):
+        """Construct a TextTestResult writing to stream."""
+        super(TextTestResult, self).__init__(failfast=failfast)
+        self.stream = stream
+        self.sep1 = '=' * 70 + '\n'
+        self.sep2 = '-' * 70 + '\n'
+
+    def _delta_to_float(self, a_timedelta):
+        return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
+            a_timedelta.microseconds / 1000000.0)
+
+    def _show_list(self, label, error_list):
+        for test, output in error_list:
+            self.stream.write(self.sep1)
+            self.stream.write("%s: %s\n" % (label, test.id()))
+            self.stream.write(self.sep2)
+            self.stream.write(output)
+
+    def startTestRun(self):
+        super(TextTestResult, self).startTestRun()
+        self.__start = self._now()
+        self.stream.write("Tests running...\n")
+
+    def stopTestRun(self):
+        if self.testsRun != 1:
+            plural = 's'
+        else:
+            plural = ''
+        stop = self._now()
+        self._show_list('ERROR', self.errors)
+        self._show_list('FAIL', self.failures)
+        for test in self.unexpectedSuccesses:
+            self.stream.write(
+                "%sUNEXPECTED SUCCESS: %s\n%s" % (
+                    self.sep1, test.id(), self.sep2))
+        self.stream.write("\nRan %d test%s in %.3fs\n" %
+            (self.testsRun, plural,
+             self._delta_to_float(stop - self.__start)))
+        if self.wasSuccessful():
+            self.stream.write("OK\n")
+        else:
+            self.stream.write("FAILED (")
+            details = []
+            details.append("failures=%d" % (
+                sum(map(len, (
+                    self.failures, self.errors, self.unexpectedSuccesses)))))
+            self.stream.write(", ".join(details))
+            self.stream.write(")\n")
+        super(TextTestResult, self).stopTestRun()
+
+
+class ThreadsafeForwardingResult(TestResult):
+    """A TestResult which ensures the target does not receive mixed up calls.
+
+    Multiple ``ThreadsafeForwardingResults`` can forward to the same target
+    result, and that target result will only ever receive the complete set of
+    events for one test at a time.
+
+    This is enforced using a semaphore, which further guarantees that tests
+    will be sent atomically even if the ``ThreadsafeForwardingResults`` are in
+    different threads.
+
+    ``ThreadsafeForwardingResult`` is typically used by
+    ``ConcurrentTestSuite``, which creates one ``ThreadsafeForwardingResult``
+    per thread, each of which wraps of the TestResult that
+    ``ConcurrentTestSuite.run()`` is called with.
+
+    target.startTestRun() and target.stopTestRun() are called once for each
+    ThreadsafeForwardingResult that forwards to the same target. If the target
+    takes special action on these events, it should take care to accommodate
+    this.
+
+    time() and tags() calls are batched to be adjacent to the test result and
+    in the case of tags() are coerced into test-local scope, avoiding the
+    opportunity for bugs around global state in the target.
+    """
+
+    def __init__(self, target, semaphore):
+        """Create a ThreadsafeForwardingResult forwarding to target.
+
+        :param target: A ``TestResult``.
+        :param semaphore: A ``threading.Semaphore`` with limit 1.
+        """
+        TestResult.__init__(self)
+        self.result = ExtendedToOriginalDecorator(target)
+        self.semaphore = semaphore
+        self._test_start = None
+        self._global_tags = set(), set()
+        self._test_tags = set(), set()
+
+    def __repr__(self):
+        return '<%s %r>' % (self.__class__.__name__, self.result)
+
+    def _any_tags(self, tags):
+        return bool(tags[0] or tags[1])
+
+    def _add_result_with_semaphore(self, method, test, *args, **kwargs):
+        now = self._now()
+        self.semaphore.acquire()
+        try:
+            self.result.time(self._test_start)
+            self.result.startTest(test)
+            self.result.time(now)
+            if self._any_tags(self._global_tags):
+                self.result.tags(*self._global_tags)
+            if self._any_tags(self._test_tags):
+                self.result.tags(*self._test_tags)
+            self._test_tags = set(), set()
+            try:
+                method(test, *args, **kwargs)
+            finally:
+                self.result.stopTest(test)
+        finally:
+            self.semaphore.release()
+        self._test_start = None
+
+    def addError(self, test, err=None, details=None):
+        self._add_result_with_semaphore(self.result.addError,
+            test, err, details=details)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._add_result_with_semaphore(self.result.addExpectedFailure,
+            test, err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        self._add_result_with_semaphore(self.result.addFailure,
+            test, err, details=details)
+
+    def addSkip(self, test, reason=None, details=None):
+        self._add_result_with_semaphore(self.result.addSkip,
+            test, reason, details=details)
+
+    def addSuccess(self, test, details=None):
+        self._add_result_with_semaphore(self.result.addSuccess,
+            test, details=details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._add_result_with_semaphore(self.result.addUnexpectedSuccess,
+            test, details=details)
+
+    def progress(self, offset, whence):
+        pass
+
+    def startTestRun(self):
+        super(ThreadsafeForwardingResult, self).startTestRun()
+        self.semaphore.acquire()
+        try:
+            self.result.startTestRun()
+        finally:
+            self.semaphore.release()
+
+    def _get_shouldStop(self):
+        self.semaphore.acquire()
+        try:
+            return self.result.shouldStop
+        finally:
+            self.semaphore.release()
+    def _set_shouldStop(self, value):
+        # Another case where we should not subclass TestResult
+        pass
+    shouldStop = property(_get_shouldStop, _set_shouldStop)
+
+    def stop(self):
+        self.semaphore.acquire()
+        try:
+            self.result.stop()
+        finally:
+            self.semaphore.release()
+
+    def stopTestRun(self):
+        self.semaphore.acquire()
+        try:
+            self.result.stopTestRun()
+        finally:
+            self.semaphore.release()
+
+    def done(self):
+        self.semaphore.acquire()
+        try:
+            self.result.done()
+        finally:
+            self.semaphore.release()
+
+    def startTest(self, test):
+        self._test_start = self._now()
+        super(ThreadsafeForwardingResult, self).startTest(test)
+
+    def wasSuccessful(self):
+        return self.result.wasSuccessful()
+
+    def tags(self, new_tags, gone_tags):
+        """See `TestResult`."""
+        super(ThreadsafeForwardingResult, self).tags(new_tags, gone_tags)
+        if self._test_start is not None:
+            self._test_tags = _merge_tags(
+                self._test_tags, (new_tags, gone_tags))
+        else:
+            self._global_tags = _merge_tags(
+                self._global_tags, (new_tags, gone_tags))
+
+
+def _merge_tags(existing, changed):
+    new_tags, gone_tags = changed
+    result_new = set(existing[0])
+    result_gone = set(existing[1])
+    result_new.update(new_tags)
+    result_new.difference_update(gone_tags)
+    result_gone.update(gone_tags)
+    result_gone.difference_update(new_tags)
+    return result_new, result_gone
+
+
+class ExtendedToOriginalDecorator(object):
+    """Permit new TestResult API code to degrade gracefully with old results.
+
+    This decorates an existing TestResult and converts missing outcomes
+    such as addSkip to older outcomes such as addSuccess. It also supports
+    the extended details protocol. In all cases the most recent protocol
+    is attempted first, and fallbacks only occur when the decorated result
+    does not support the newer style of calling.
+    """
+
+    def __init__(self, decorated):
+        self.decorated = decorated
+        self._tags = TagContext()
+        # Only used for old TestResults that do not have failfast.
+        self._failfast = False
+
+    def __repr__(self):
+        return '<%s %r>' % (self.__class__.__name__, self.decorated)
+
+    def __getattr__(self, name):
+        return getattr(self.decorated, name)
+
+    def addError(self, test, err=None, details=None):
+        try:
+            self._check_args(err, details)
+            if details is not None:
+                try:
+                    return self.decorated.addError(test, details=details)
+                except TypeError:
+                    # have to convert
+                    err = self._details_to_exc_info(details)
+            return self.decorated.addError(test, err)
+        finally:
+            if self.failfast:
+                self.stop()
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._check_args(err, details)
+        addExpectedFailure = getattr(
+            self.decorated, 'addExpectedFailure', None)
+        if addExpectedFailure is None:
+            return self.addSuccess(test)
+        if details is not None:
+            try:
+                return addExpectedFailure(test, details=details)
+            except TypeError:
+                # have to convert
+                err = self._details_to_exc_info(details)
+        return addExpectedFailure(test, err)
+
+    def addFailure(self, test, err=None, details=None):
+        try:
+            self._check_args(err, details)
+            if details is not None:
+                try:
+                    return self.decorated.addFailure(test, details=details)
+                except TypeError:
+                    # have to convert
+                    err = self._details_to_exc_info(details)
+            return self.decorated.addFailure(test, err)
+        finally:
+            if self.failfast:
+                self.stop()
+
+    def addSkip(self, test, reason=None, details=None):
+        self._check_args(reason, details)
+        addSkip = getattr(self.decorated, 'addSkip', None)
+        if addSkip is None:
+            return self.decorated.addSuccess(test)
+        if details is not None:
+            try:
+                return addSkip(test, details=details)
+            except TypeError:
+                # extract the reason if it's available
+                try:
+                    reason = details['reason'].as_text()
+                except KeyError:
+                    reason = _details_to_str(details)
+        return addSkip(test, reason)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        try:
+            outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
+            if outcome is None:
+                try:
+                    test.fail("")
+                except test.failureException:
+                    return self.addFailure(test, sys.exc_info())
+            if details is not None:
+                try:
+                    return outcome(test, details=details)
+                except TypeError:
+                    pass
+            return outcome(test)
+        finally:
+            if self.failfast:
+                self.stop()
+
+    def addSuccess(self, test, details=None):
+        if details is not None:
+            try:
+                return self.decorated.addSuccess(test, details=details)
+            except TypeError:
+                pass
+        return self.decorated.addSuccess(test)
+
+    def _check_args(self, err, details):
+        param_count = 0
+        if err is not None:
+            param_count += 1
+        if details is not None:
+            param_count += 1
+        if param_count != 1:
+            raise ValueError("Must pass only one of err '%s' and details '%s"
+                % (err, details))
+
+    def _details_to_exc_info(self, details):
+        """Convert a details dict to an exc_info tuple."""
+        return (
+            _StringException,
+            _StringException(_details_to_str(details, special='traceback')),
+            None)
+
+    @property
+    def current_tags(self):
+        return getattr(
+            self.decorated, 'current_tags', self._tags.get_current_tags())
+
+    def done(self):
+        try:
+            return self.decorated.done()
+        except AttributeError:
+            return
+
+    def _get_failfast(self):
+        return getattr(self.decorated, 'failfast', self._failfast)
+    def _set_failfast(self, value):
+        if safe_hasattr(self.decorated, 'failfast'):
+            self.decorated.failfast = value
+        else:
+            self._failfast = value
+    failfast = property(_get_failfast, _set_failfast)
+
+    def progress(self, offset, whence):
+        method = getattr(self.decorated, 'progress', None)
+        if method is None:
+            return
+        return method(offset, whence)
+
+    @property
+    def shouldStop(self):
+        return self.decorated.shouldStop
+
+    def startTest(self, test):
+        self._tags = TagContext(self._tags)
+        return self.decorated.startTest(test)
+
+    def startTestRun(self):
+        self._tags = TagContext()
+        try:
+            return self.decorated.startTestRun()
+        except AttributeError:
+            return
+
+    def stop(self):
+        return self.decorated.stop()
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+        return self.decorated.stopTest(test)
+
+    def stopTestRun(self):
+        try:
+            return self.decorated.stopTestRun()
+        except AttributeError:
+            return
+
+    def tags(self, new_tags, gone_tags):
+        method = getattr(self.decorated, 'tags', None)
+        if method is not None:
+            return method(new_tags, gone_tags)
+        else:
+            self._tags.change_tags(new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        method = getattr(self.decorated, 'time', None)
+        if method is None:
+            return
+        return method(a_datetime)
+
+    def wasSuccessful(self):
+        return self.decorated.wasSuccessful()
+
+
+class ExtendedToStreamDecorator(CopyStreamResult, StreamSummary, TestControl):
+    """Permit using old TestResult API code with new StreamResult objects.
+
+    This decorates a StreamResult and converts old (Python 2.6 / 2.7 /
+    Extended) TestResult API calls into StreamResult calls.
+
+    It also supports regular StreamResult calls, making it safe to wrap around
+    any StreamResult.
+    """
+
+    def __init__(self, decorated):
+        super(ExtendedToStreamDecorator, self).__init__([decorated])
+        # Deal with mismatched base class constructors.
+        TestControl.__init__(self)
+        self._started = False
+
+    def _get_failfast(self):
+        return len(self.targets) == 2
+    def _set_failfast(self, value):
+        if value:
+            if len(self.targets) == 2:
+                return
+            self.targets.append(StreamFailFast(self.stop))
+        else:
+            del self.targets[1:]
+    failfast = property(_get_failfast, _set_failfast)
+
+    def startTest(self, test):
+        if not self._started:
+            self.startTestRun()
+        self.status(test_id=test.id(), test_status='inprogress', timestamp=self._now())
+        self._tags = TagContext(self._tags)
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+
+    def addError(self, test, err=None, details=None):
+        self._check_args(err, details)
+        self._convert(test, err, details, 'fail')
+    addFailure = addError
+
+    def _convert(self, test, err, details, status, reason=None):
+        if not self._started:
+            self.startTestRun()
+        test_id = test.id()
+        now = self._now()
+        if err is not None:
+            if details is None:
+                details = {}
+            details['traceback'] = TracebackContent(err, test)
+        if details is not None:
+            for name, content in details.items():
+                mime_type = repr(content.content_type)
+                file_bytes = None
+                for next_bytes in content.iter_bytes():
+                    if file_bytes is not None:
+                        self.status(file_name=name, file_bytes=file_bytes,
+                            mime_type=mime_type, test_id=test_id, timestamp=now)
+                    file_bytes = next_bytes
+                self.status(file_name=name, file_bytes=file_bytes, eof=True,
+                    mime_type=mime_type, test_id=test_id, timestamp=now)
+        if reason is not None:
+            self.status(file_name='reason', file_bytes=reason.encode('utf8'),
+                eof=True, mime_type="text/plain; charset=utf8",
+                test_id=test_id, timestamp=now)
+        self.status(test_id=test_id, test_status=status,
+            test_tags=self.current_tags, timestamp=now)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._check_args(err, details)
+        self._convert(test, err, details, 'xfail')
+
+    def addSkip(self, test, reason=None, details=None):
+        self._convert(test, None, details, 'skip', reason)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._convert(test, None, details, 'uxsuccess')
+
+    def addSuccess(self, test, details=None):
+        self._convert(test, None, details, 'success')
+
+    def _check_args(self, err, details):
+        param_count = 0
+        if err is not None:
+            param_count += 1
+        if details is not None:
+            param_count += 1
+        if param_count != 1:
+            raise ValueError("Must pass only one of err '%s' and details '%s"
+                % (err, details))
+
+    def startTestRun(self):
+        super(ExtendedToStreamDecorator, self).startTestRun()
+        self._tags = TagContext()
+        self.shouldStop = False
+        self.__now = None
+        self._started = True
+
+    def stopTest(self, test):
+        self._tags = self._tags.parent
+
+    @property
+    def current_tags(self):
+        """The currently set tags."""
+        return self._tags.get_current_tags()
+
+    def tags(self, new_tags, gone_tags):
+        """Add and remove tags from the test.
+
+        :param new_tags: A set of tags to be added to the stream.
+        :param gone_tags: A set of tags to be removed from the stream.
+        """
+        self._tags.change_tags(new_tags, gone_tags)
+
+    def _now(self):
+        """Return the current 'test time'.
+
+        If the time() method has not been called, this is equivalent to
+        datetime.now(), otherwise its the last supplied datestamp given to the
+        time() method.
+        """
+        if self.__now is None:
+            return datetime.datetime.now(utc)
+        else:
+            return self.__now
+
+    def time(self, a_datetime):
+        self.__now = a_datetime
+
+    def wasSuccessful(self):
+        if not self._started:
+            self.startTestRun()
+        return super(ExtendedToStreamDecorator, self).wasSuccessful()
+
+
+class StreamToExtendedDecorator(StreamResult):
+    """Convert StreamResult API calls into ExtendedTestResult calls.
+
+    This will buffer all calls for all concurrently active tests, and
+    then flush each test as they complete.
+
+    Incomplete tests will be flushed as errors when the test run stops.
+
+    Non test file attachments are accumulated into a test called
+    'testtools.extradata' flushed at the end of the run.
+    """
+
+    def __init__(self, decorated):
+        # ExtendedToOriginalDecorator takes care of thunking details back to
+        # exceptions/reasons etc.
+        self.decorated = ExtendedToOriginalDecorator(decorated)
+        # StreamToDict buffers and gives us individual tests.
+        self.hook = StreamToDict(self._handle_tests)
+
+    def status(self, test_id=None, test_status=None, *args, **kwargs):
+        if test_status == 'exists':
+            return
+        self.hook.status(
+            test_id=test_id, test_status=test_status, *args, **kwargs)
+
+    def startTestRun(self):
+        self.decorated.startTestRun()
+        self.hook.startTestRun()
+
+    def stopTestRun(self):
+        self.hook.stopTestRun()
+        self.decorated.stopTestRun()
+
+    def _handle_tests(self, test_dict):
+        case = test_dict_to_case(test_dict)
+        case.run(self.decorated)
+
+
+class StreamToQueue(StreamResult):
+    """A StreamResult which enqueues events as a dict to a queue.Queue.
+
+    Events have their route code updated to include the route code
+    StreamToQueue was constructed with before they are submitted. If the event
+    route code is None, it is replaced with the StreamToQueue route code,
+    otherwise it is prefixed with the supplied code + a hyphen.
+
+    startTestRun and stopTestRun are forwarded to the queue. Implementors that
+    dequeue events back into StreamResult calls should take care not to call
+    startTestRun / stopTestRun on other StreamResult objects multiple times
+    (e.g. by filtering startTestRun and stopTestRun).
+
+    ``StreamToQueue`` is typically used by
+    ``ConcurrentStreamTestSuite``, which creates one ``StreamToQueue``
+    per thread, forwards status events to the the StreamResult that
+    ``ConcurrentStreamTestSuite.run()`` was called with, and uses the
+    stopTestRun event to trigger calling join() on the each thread.
+
+    Unlike ThreadsafeForwardingResult which this supercedes, no buffering takes
+    place - any event supplied to a StreamToQueue will be inserted into the
+    queue immediately.
+
+    Events are forwarded as a dict with a key ``event`` which is one of
+    ``startTestRun``, ``stopTestRun`` or ``status``. When ``event`` is
+    ``status`` the dict also has keys matching the keyword arguments
+    of ``StreamResult.status``, otherwise it has one other key ``result`` which
+    is the result that invoked ``startTestRun``.
+    """
+
+    def __init__(self, queue, routing_code):
+        """Create a StreamToQueue forwarding to target.
+
+        :param queue: A ``queue.Queue`` to receive events.
+        :param routing_code: The routing code to apply to messages.
+        """
+        super(StreamToQueue, self).__init__()
+        self.queue = queue
+        self.routing_code = routing_code
+
+    def startTestRun(self):
+        self.queue.put(dict(event='startTestRun', result=self))
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        self.queue.put(dict(event='status', test_id=test_id,
+            test_status=test_status, test_tags=test_tags, runnable=runnable,
+            file_name=file_name, file_bytes=file_bytes, eof=eof,
+            mime_type=mime_type, route_code=self.route_code(route_code),
+            timestamp=timestamp))
+
+    def stopTestRun(self):
+        self.queue.put(dict(event='stopTestRun', result=self))
+
+    def route_code(self, route_code):
+        """Adjust route_code on the way through."""
+        if route_code is None:
+            return self.routing_code
+        return self.routing_code + _u("/") + route_code
+
+
+class TestResultDecorator(object):
+    """General pass-through decorator.
+
+    This provides a base that other TestResults can inherit from to
+    gain basic forwarding functionality.
+    """
+
+    def __init__(self, decorated):
+        """Create a TestResultDecorator forwarding to decorated."""
+        self.decorated = decorated
+
+    def startTest(self, test):
+        return self.decorated.startTest(test)
+
+    def startTestRun(self):
+        return self.decorated.startTestRun()
+
+    def stopTest(self, test):
+        return self.decorated.stopTest(test)
+
+    def stopTestRun(self):
+        return self.decorated.stopTestRun()
+
+    def addError(self, test, err=None, details=None):
+        return self.decorated.addError(test, err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        return self.decorated.addFailure(test, err, details=details)
+
+    def addSuccess(self, test, details=None):
+        return self.decorated.addSuccess(test, details=details)
+
+    def addSkip(self, test, reason=None, details=None):
+        return self.decorated.addSkip(test, reason, details=details)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        return self.decorated.addExpectedFailure(test, err, details=details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        return self.decorated.addUnexpectedSuccess(test, details=details)
+
+    def progress(self, offset, whence):
+        return self.decorated.progress(offset, whence)
+
+    def wasSuccessful(self):
+        return self.decorated.wasSuccessful()
+
+    @property
+    def current_tags(self):
+        return self.decorated.current_tags
+
+    @property
+    def shouldStop(self):
+        return self.decorated.shouldStop
+
+    def stop(self):
+        return self.decorated.stop()
+
+    @property
+    def testsRun(self):
+        return self.decorated.testsRun
+
+    def tags(self, new_tags, gone_tags):
+        return self.decorated.tags(new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        return self.decorated.time(a_datetime)
+
+
+class Tagger(TestResultDecorator):
+    """Tag each test individually."""
+
+    def __init__(self, decorated, new_tags, gone_tags):
+        """Wrap 'decorated' such that each test is tagged.
+
+        :param new_tags: Tags to be added for each test.
+        :param gone_tags: Tags to be removed for each test.
+        """
+        super(Tagger, self).__init__(decorated)
+        self._new_tags = set(new_tags)
+        self._gone_tags = set(gone_tags)
+
+    def startTest(self, test):
+        super(Tagger, self).startTest(test)
+        self.tags(self._new_tags, self._gone_tags)
+
+
+class TestByTestResult(TestResult):
+    """Call something every time a test completes."""
+
+    def __init__(self, on_test):
+        """Construct a ``TestByTestResult``.
+
+        :param on_test: A callable that take a test case, a status (one of
+            "success", "failure", "error", "skip", or "xfail"), a start time
+            (a ``datetime`` with timezone), a stop time, an iterable of tags,
+            and a details dict. Is called at the end of each test (i.e. on
+            ``stopTest``) with the accumulated values for that test.
+        """
+        super(TestByTestResult, self).__init__()
+        self._on_test = on_test
+
+    def startTest(self, test):
+        super(TestByTestResult, self).startTest(test)
+        self._start_time = self._now()
+        # There's no supported (i.e. tested) behaviour that relies on these
+        # being set, but it makes me more comfortable all the same. -- jml
+        self._status = None
+        self._details = None
+        self._stop_time = None
+
+    def stopTest(self, test):
+        self._stop_time = self._now()
+        tags = set(self.current_tags)
+        super(TestByTestResult, self).stopTest(test)
+        self._on_test(
+            test=test,
+            status=self._status,
+            start_time=self._start_time,
+            stop_time=self._stop_time,
+            tags=tags,
+            details=self._details)
+
+    def _err_to_details(self, test, err, details):
+        if details:
+            return details
+        return {'traceback': TracebackContent(err, test)}
+
+    def addSuccess(self, test, details=None):
+        super(TestByTestResult, self).addSuccess(test)
+        self._status = 'success'
+        self._details = details
+
+    def addFailure(self, test, err=None, details=None):
+        super(TestByTestResult, self).addFailure(test, err, details)
+        self._status = 'failure'
+        self._details = self._err_to_details(test, err, details)
+
+    def addError(self, test, err=None, details=None):
+        super(TestByTestResult, self).addError(test, err, details)
+        self._status = 'error'
+        self._details = self._err_to_details(test, err, details)
+
+    def addSkip(self, test, reason=None, details=None):
+        super(TestByTestResult, self).addSkip(test, reason, details)
+        self._status = 'skip'
+        if details is None:
+            details = {'reason': text_content(reason)}
+        elif reason:
+            # XXX: What if details already has 'reason' key?
+            details['reason'] = text_content(reason)
+        self._details = details
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        super(TestByTestResult, self).addExpectedFailure(test, err, details)
+        self._status = 'xfail'
+        self._details = self._err_to_details(test, err, details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+        self._status = 'success'
+        self._details = details
+
+
+class TimestampingStreamResult(CopyStreamResult):
+    """A StreamResult decorator that assigns a timestamp when none is present.
+
+    This is convenient for ensuring events are timestamped.
+    """
+
+    def __init__(self, target):
+        super(TimestampingStreamResult, self).__init__([target])
+
+    def status(self, *args, **kwargs):
+        timestamp = kwargs.pop('timestamp', None)
+        if timestamp is None:
+            timestamp = datetime.datetime.now(utc)
+        super(TimestampingStreamResult, self).status(
+            *args, timestamp=timestamp, **kwargs)
+
+
+class _StringException(Exception):
+    """An exception made from an arbitrary string."""
+
+    if not str_is_unicode:
+        def __init__(self, string):
+            if type(string) is not unicode:
+                raise TypeError("_StringException expects unicode, got %r" %
+                    (string,))
+            Exception.__init__(self, string)
+
+        def __str__(self):
+            return self.args[0].encode("utf-8")
+
+        def __unicode__(self):
+            return self.args[0]
+    # For 3.0 and above the default __str__ is fine, so we don't define one.
+
+    def __hash__(self):
+        return id(self)
+
+    def __eq__(self, other):
+        try:
+            return self.args == other.args
+        except AttributeError:
+            return False
+
+
+def _format_text_attachment(name, text):
+    if '\n' in text:
+        return "%s: {{{\n%s\n}}}\n" % (name, text)
+    return "%s: {{{%s}}}" % (name, text)
+
+
+def _details_to_str(details, special=None):
+    """Convert a details dict to a string.
+
+    :param details: A dictionary mapping short names to ``Content`` objects.
+    :param special: If specified, an attachment that should have special
+        attention drawn to it. The primary attachment. Normally it's the
+        traceback that caused the test to fail.
+    :return: A formatted string that can be included in text test results.
+    """
+    empty_attachments = []
+    binary_attachments = []
+    text_attachments = []
+    special_content = None
+    # sorted is for testing, may want to remove that and use a dict
+    # subclass with defined order for items instead.
+    for key, content in sorted(details.items()):
+        if content.content_type.type != 'text':
+            binary_attachments.append((key, content.content_type))
+            continue
+        text = content.as_text().strip()
+        if not text:
+            empty_attachments.append(key)
+            continue
+        # We want the 'special' attachment to be at the bottom.
+        if key == special:
+            special_content = '%s\n' % (text,)
+            continue
+        text_attachments.append(_format_text_attachment(key, text))
+    if text_attachments and not text_attachments[-1].endswith('\n'):
+        text_attachments.append('')
+    if special_content:
+        text_attachments.append(special_content)
+    lines = []
+    if binary_attachments:
+        lines.append('Binary content:\n')
+        for name, content_type in binary_attachments:
+            lines.append('  %s (%s)\n' % (name, content_type))
+    if empty_attachments:
+        lines.append('Empty attachments:\n')
+        for name in empty_attachments:
+            lines.append('  %s\n' % (name,))
+    if (binary_attachments or empty_attachments) and text_attachments:
+        lines.append('\n')
+    lines.append('\n'.join(text_attachments))
+    return _u('').join(lines)
diff --git a/third_party/testtools/testtools/tests/__init__.py b/third_party/testtools/testtools/tests/__init__.py
new file mode 100644
index 0000000..d40fcb3
--- /dev/null
+++ b/third_party/testtools/testtools/tests/__init__.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2008-2013 testtools developers. See LICENSE for details.
+
+"""Tests for testtools itself."""
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+    from testtools.tests import (
+        matchers,
+        test_assert_that,
+        test_compat,
+        test_content,
+        test_content_type,
+        test_deferredruntest,
+        test_distutilscmd,
+        test_fixturesupport,
+        test_helpers,
+        test_monkey,
+        test_run,
+        test_runtest,
+        test_spinner,
+        test_tags,
+        test_testcase,
+        test_testresult,
+        test_testsuite,
+        )
+    modules = [
+        matchers,
+        test_assert_that,
+        test_compat,
+        test_content,
+        test_content_type,
+        test_deferredruntest,
+        test_distutilscmd,
+        test_fixturesupport,
+        test_helpers,
+        test_monkey,
+        test_run,
+        test_runtest,
+        test_spinner,
+        test_tags,
+        test_testcase,
+        test_testresult,
+        test_testsuite,
+        ]
+    suites = map(lambda x: x.test_suite(), modules)
+    return TestSuite(suites)
diff --git a/third_party/testtools/testtools/tests/helpers.py b/third_party/testtools/testtools/tests/helpers.py
new file mode 100644
index 0000000..f766da3
--- /dev/null
+++ b/third_party/testtools/testtools/tests/helpers.py
@@ -0,0 +1,108 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Helpers for tests."""
+
+__all__ = [
+    'LoggingResult',
+    ]
+
+import sys
+
+from extras import safe_hasattr
+
+from testtools import TestResult
+from testtools.content import StackLinesContent
+from testtools import runtest
+
+
+# Importing to preserve compatibility.
+safe_hasattr
+
+# GZ 2010-08-12: Don't do this, pointlessly creates an exc_info cycle
+try:
+    raise Exception
+except Exception:
+    an_exc_info = sys.exc_info()
+
+# Deprecated: This classes attributes are somewhat non deterministic which
+# leads to hard to predict tests (because Python upstream are changing things.
+class LoggingResult(TestResult):
+    """TestResult that logs its event to a list."""
+
+    def __init__(self, log):
+        self._events = log
+        super(LoggingResult, self).__init__()
+
+    def startTest(self, test):
+        self._events.append(('startTest', test))
+        super(LoggingResult, self).startTest(test)
+
+    def stop(self):
+        self._events.append('stop')
+        super(LoggingResult, self).stop()
+
+    def stopTest(self, test):
+        self._events.append(('stopTest', test))
+        super(LoggingResult, self).stopTest(test)
+
+    def addFailure(self, test, error):
+        self._events.append(('addFailure', test, error))
+        super(LoggingResult, self).addFailure(test, error)
+
+    def addError(self, test, error):
+        self._events.append(('addError', test, error))
+        super(LoggingResult, self).addError(test, error)
+
+    def addSkip(self, test, reason):
+        self._events.append(('addSkip', test, reason))
+        super(LoggingResult, self).addSkip(test, reason)
+
+    def addSuccess(self, test):
+        self._events.append(('addSuccess', test))
+        super(LoggingResult, self).addSuccess(test)
+
+    def startTestRun(self):
+        self._events.append('startTestRun')
+        super(LoggingResult, self).startTestRun()
+
+    def stopTestRun(self):
+        self._events.append('stopTestRun')
+        super(LoggingResult, self).stopTestRun()
+
+    def done(self):
+        self._events.append('done')
+        super(LoggingResult, self).done()
+
+    def tags(self, new_tags, gone_tags):
+        self._events.append(('tags', new_tags, gone_tags))
+        super(LoggingResult, self).tags(new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        self._events.append(('time', a_datetime))
+        super(LoggingResult, self).time(a_datetime)
+
+
+def is_stack_hidden():
+    return StackLinesContent.HIDE_INTERNAL_STACK
+
+
+def hide_testtools_stack(should_hide=True):
+    result = StackLinesContent.HIDE_INTERNAL_STACK
+    StackLinesContent.HIDE_INTERNAL_STACK = should_hide
+    return result
+
+
+def run_with_stack_hidden(should_hide, f, *args, **kwargs):
+    old_should_hide = hide_testtools_stack(should_hide)
+    try:
+        return f(*args, **kwargs)
+    finally:
+        hide_testtools_stack(old_should_hide)
+
+
+class FullStackRunTest(runtest.RunTest):
+
+    def _run_user(self, fn, *args, **kwargs):
+        return run_with_stack_hidden(
+            False,
+            super(FullStackRunTest, self)._run_user, fn, *args, **kwargs)
diff --git a/third_party/testtools/testtools/tests/matchers/__init__.py b/third_party/testtools/testtools/tests/matchers/__init__.py
new file mode 100644
index 0000000..ebab308
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) 2009-2012 testtools developers. See LICENSE for details.
+
+
+from unittest import TestSuite
+
+
+def test_suite():
+    from testtools.tests.matchers import (
+        test_basic,
+        test_datastructures,
+        test_dict,
+        test_doctest,
+        test_exception,
+        test_filesystem,
+        test_higherorder,
+        test_impl,
+        )
+    modules = [
+        test_basic,
+        test_datastructures,
+        test_dict,
+        test_doctest,
+        test_exception,
+        test_filesystem,
+        test_higherorder,
+        test_impl,
+        ]
+    suites = map(lambda x: x.test_suite(), modules)
+    return TestSuite(suites)
diff --git a/third_party/testtools/testtools/tests/matchers/helpers.py b/third_party/testtools/testtools/tests/matchers/helpers.py
new file mode 100644
index 0000000..3ff8727
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/helpers.py
@@ -0,0 +1,42 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestMatchersInterface(object):
+
+    run_tests_with = FullStackRunTest
+
+    def test_matches_match(self):
+        matcher = self.matches_matcher
+        matches = self.matches_matches
+        mismatches = self.matches_mismatches
+        for candidate in matches:
+            self.assertEqual(None, matcher.match(candidate))
+        for candidate in mismatches:
+            mismatch = matcher.match(candidate)
+            self.assertNotEqual(None, mismatch)
+            self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+    def test__str__(self):
+        # [(expected, object to __str__)].
+        from testtools.matchers._doctest import DocTestMatches
+        examples = self.str_examples
+        for expected, matcher in examples:
+            self.assertThat(matcher, DocTestMatches(expected))
+
+    def test_describe_difference(self):
+        # [(expected, matchee, matcher), ...]
+        examples = self.describe_examples
+        for difference, matchee, matcher in examples:
+            mismatch = matcher.match(matchee)
+            self.assertEqual(difference, mismatch.describe())
+
+    def test_mismatch_details(self):
+        # The mismatch object must provide get_details, which must return a
+        # dictionary mapping names to Content objects.
+        examples = self.describe_examples
+        for difference, matchee, matcher in examples:
+            mismatch = matcher.match(matchee)
+            details = mismatch.get_details()
+            self.assertEqual(dict(details), details)
diff --git a/third_party/testtools/testtools/tests/matchers/test_basic.py b/third_party/testtools/testtools/tests/matchers/test_basic.py
new file mode 100644
index 0000000..c53bc9e
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_basic.py
@@ -0,0 +1,396 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import re
+
+from testtools import TestCase
+from testtools.compat import (
+    text_repr,
+    _b,
+    _u,
+    )
+from testtools.matchers._basic import (
+    _BinaryMismatch,
+    Contains,
+    DoesNotEndWith,
+    DoesNotStartWith,
+    EndsWith,
+    Equals,
+    Is,
+    IsInstance,
+    LessThan,
+    GreaterThan,
+    HasLength,
+    MatchesRegex,
+    NotEquals,
+    SameMembers,
+    StartsWith,
+    )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class Test_BinaryMismatch(TestCase):
+    """Mismatches from binary comparisons need useful describe output"""
+
+    _long_string = "This is a longish multiline non-ascii string\n\xa7"
+    _long_b = _b(_long_string)
+    _long_u = _u(_long_string)
+
+    class CustomRepr(object):
+        def __init__(self, repr_string):
+            self._repr_string = repr_string
+        def __repr__(self):
+            return _u('<object ') + _u(self._repr_string) + _u('>')
+
+    def test_short_objects(self):
+        o1, o2 = self.CustomRepr('a'), self.CustomRepr('b')
+        mismatch = _BinaryMismatch(o1, "!~", o2)
+        self.assertEqual(mismatch.describe(), "%r !~ %r" % (o1, o2))
+
+    def test_short_mixed_strings(self):
+        b, u = _b("\xa7"), _u("\xa7")
+        mismatch = _BinaryMismatch(b, "!~", u)
+        self.assertEqual(mismatch.describe(), "%r !~ %r" % (b, u))
+
+    def test_long_bytes(self):
+        one_line_b = self._long_b.replace(_b("\n"), _b(" "))
+        mismatch = _BinaryMismatch(one_line_b, "!~", self._long_b)
+        self.assertEqual(mismatch.describe(),
+            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
+                text_repr(one_line_b),
+                text_repr(self._long_b, multiline=True)))
+
+    def test_long_unicode(self):
+        one_line_u = self._long_u.replace("\n", " ")
+        mismatch = _BinaryMismatch(one_line_u, "!~", self._long_u)
+        self.assertEqual(mismatch.describe(),
+            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
+                text_repr(one_line_u),
+                text_repr(self._long_u, multiline=True)))
+
+    def test_long_mixed_strings(self):
+        mismatch = _BinaryMismatch(self._long_b, "!~", self._long_u)
+        self.assertEqual(mismatch.describe(),
+            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
+                text_repr(self._long_b, multiline=True),
+                text_repr(self._long_u, multiline=True)))
+
+    def test_long_bytes_and_object(self):
+        obj = object()
+        mismatch = _BinaryMismatch(self._long_b, "!~", obj)
+        self.assertEqual(mismatch.describe(),
+            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
+                text_repr(self._long_b, multiline=True),
+                repr(obj)))
+
+    def test_long_unicode_and_object(self):
+        obj = object()
+        mismatch = _BinaryMismatch(self._long_u, "!~", obj)
+        self.assertEqual(mismatch.describe(),
+            "%s:\nreference = %s\nactual    = %s\n" % ("!~",
+                text_repr(self._long_u, multiline=True),
+                repr(obj)))
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = Equals(1)
+    matches_matches = [1]
+    matches_mismatches = [2]
+
+    str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+    describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = NotEquals(1)
+    matches_matches = [2]
+    matches_mismatches = [1]
+
+    str_examples = [
+        ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+    describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestIsInterface(TestCase, TestMatchersInterface):
+
+    foo = object()
+    bar = object()
+
+    matches_matcher = Is(foo)
+    matches_matches = [foo]
+    matches_mismatches = [bar, 1]
+
+    str_examples = [("Is(2)", Is(2))]
+
+    describe_examples = [("1 is not 2", 2, Is(1))]
+
+
+class TestIsInstanceInterface(TestCase, TestMatchersInterface):
+
+    class Foo:pass
+
+    matches_matcher = IsInstance(Foo)
+    matches_matches = [Foo()]
+    matches_mismatches = [object(), 1, Foo]
+
+    str_examples = [
+            ("IsInstance(str)", IsInstance(str)),
+            ("IsInstance(str, int)", IsInstance(str, int)),
+            ]
+
+    describe_examples = [
+            ("'foo' is not an instance of int", 'foo', IsInstance(int)),
+            ("'foo' is not an instance of any of (int, type)", 'foo',
+             IsInstance(int, type)),
+            ]
+
+
+class TestLessThanInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = LessThan(4)
+    matches_matches = [-5, 3]
+    matches_mismatches = [4, 5, 5000]
+
+    str_examples = [
+        ("LessThan(12)", LessThan(12)),
+        ]
+
+    describe_examples = [
+        ('4 is not > 5', 5, LessThan(4)),
+        ('4 is not > 4', 4, LessThan(4)),
+        ]
+
+
+class TestGreaterThanInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = GreaterThan(4)
+    matches_matches = [5, 8]
+    matches_mismatches = [-2, 0, 4]
+
+    str_examples = [
+        ("GreaterThan(12)", GreaterThan(12)),
+        ]
+
+    describe_examples = [
+        ('5 is not < 4', 4, GreaterThan(5)),
+        ('4 is not < 4', 4, GreaterThan(4)),
+        ]
+
+
+class TestContainsInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = Contains('foo')
+    matches_matches = ['foo', 'afoo', 'fooa']
+    matches_mismatches = ['f', 'fo', 'oo', 'faoo', 'foao']
+
+    str_examples = [
+        ("Contains(1)", Contains(1)),
+        ("Contains('foo')", Contains('foo')),
+        ]
+
+    describe_examples = [("1 not in 2", 2, Contains(1))]
+
+
+class DoesNotStartWithTests(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_describe(self):
+        mismatch = DoesNotStartWith("fo", "bo")
+        self.assertEqual("'fo' does not start with 'bo'.", mismatch.describe())
+
+    def test_describe_non_ascii_unicode(self):
+        string = _u("A\xA7")
+        suffix = _u("B\xA7")
+        mismatch = DoesNotStartWith(string, suffix)
+        self.assertEqual("%s does not start with %s." % (
+            text_repr(string), text_repr(suffix)),
+            mismatch.describe())
+
+    def test_describe_non_ascii_bytes(self):
+        string = _b("A\xA7")
+        suffix = _b("B\xA7")
+        mismatch = DoesNotStartWith(string, suffix)
+        self.assertEqual("%r does not start with %r." % (string, suffix),
+            mismatch.describe())
+
+
+class StartsWithTests(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_str(self):
+        matcher = StartsWith("bar")
+        self.assertEqual("StartsWith('bar')", str(matcher))
+
+    def test_str_with_bytes(self):
+        b = _b("\xA7")
+        matcher = StartsWith(b)
+        self.assertEqual("StartsWith(%r)" % (b,), str(matcher))
+
+    def test_str_with_unicode(self):
+        u = _u("\xA7")
+        matcher = StartsWith(u)
+        self.assertEqual("StartsWith(%r)" % (u,), str(matcher))
+
+    def test_match(self):
+        matcher = StartsWith("bar")
+        self.assertIs(None, matcher.match("barf"))
+
+    def test_mismatch_returns_does_not_start_with(self):
+        matcher = StartsWith("bar")
+        self.assertIsInstance(matcher.match("foo"), DoesNotStartWith)
+
+    def test_mismatch_sets_matchee(self):
+        matcher = StartsWith("bar")
+        mismatch = matcher.match("foo")
+        self.assertEqual("foo", mismatch.matchee)
+
+    def test_mismatch_sets_expected(self):
+        matcher = StartsWith("bar")
+        mismatch = matcher.match("foo")
+        self.assertEqual("bar", mismatch.expected)
+
+
+class DoesNotEndWithTests(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_describe(self):
+        mismatch = DoesNotEndWith("fo", "bo")
+        self.assertEqual("'fo' does not end with 'bo'.", mismatch.describe())
+
+    def test_describe_non_ascii_unicode(self):
+        string = _u("A\xA7")
+        suffix = _u("B\xA7")
+        mismatch = DoesNotEndWith(string, suffix)
+        self.assertEqual("%s does not end with %s." % (
+            text_repr(string), text_repr(suffix)),
+            mismatch.describe())
+
+    def test_describe_non_ascii_bytes(self):
+        string = _b("A\xA7")
+        suffix = _b("B\xA7")
+        mismatch = DoesNotEndWith(string, suffix)
+        self.assertEqual("%r does not end with %r." % (string, suffix),
+            mismatch.describe())
+
+
+class EndsWithTests(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_str(self):
+        matcher = EndsWith("bar")
+        self.assertEqual("EndsWith('bar')", str(matcher))
+
+    def test_str_with_bytes(self):
+        b = _b("\xA7")
+        matcher = EndsWith(b)
+        self.assertEqual("EndsWith(%r)" % (b,), str(matcher))
+
+    def test_str_with_unicode(self):
+        u = _u("\xA7")
+        matcher = EndsWith(u)
+        self.assertEqual("EndsWith(%r)" % (u,), str(matcher))
+
+    def test_match(self):
+        matcher = EndsWith("arf")
+        self.assertIs(None, matcher.match("barf"))
+
+    def test_mismatch_returns_does_not_end_with(self):
+        matcher = EndsWith("bar")
+        self.assertIsInstance(matcher.match("foo"), DoesNotEndWith)
+
+    def test_mismatch_sets_matchee(self):
+        matcher = EndsWith("bar")
+        mismatch = matcher.match("foo")
+        self.assertEqual("foo", mismatch.matchee)
+
+    def test_mismatch_sets_expected(self):
+        matcher = EndsWith("bar")
+        mismatch = matcher.match("foo")
+        self.assertEqual("bar", mismatch.expected)
+
+
+class TestSameMembers(TestCase, TestMatchersInterface):
+
+    matches_matcher = SameMembers([1, 1, 2, 3, {'foo': 'bar'}])
+    matches_matches = [
+        [1, 1, 2, 3, {'foo': 'bar'}],
+        [3, {'foo': 'bar'}, 1, 2, 1],
+        [3, 2, 1, {'foo': 'bar'}, 1],
+        (2, {'foo': 'bar'}, 3, 1, 1),
+        ]
+    matches_mismatches = [
+        set([1, 2, 3]),
+        [1, 1, 2, 3, 5],
+        [1, 2, 3, {'foo': 'bar'}],
+        'foo',
+        ]
+
+    describe_examples = [
+        (("elements differ:\n"
+          "reference = ['apple', 'orange', 'canteloupe', 'watermelon', 'lemon', 'banana']\n"
+          "actual    = ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe']\n"
+          ": \n"
+          "missing:    ['watermelon']\n"
+          "extra:      ['sparrow']"
+          ),
+         ['orange', 'apple', 'banana', 'sparrow', 'lemon', 'canteloupe',],
+         SameMembers(
+             ['apple', 'orange', 'canteloupe', 'watermelon',
+              'lemon', 'banana',])),
+        ]
+
+    str_examples = [
+        ('SameMembers([1, 2, 3])', SameMembers([1, 2, 3])),
+        ]
+
+
+class TestMatchesRegex(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesRegex('a|b')
+    matches_matches = ['a', 'b']
+    matches_mismatches = ['c']
+
+    str_examples = [
+        ("MatchesRegex('a|b')", MatchesRegex('a|b')),
+        ("MatchesRegex('a|b', re.M)", MatchesRegex('a|b', re.M)),
+        ("MatchesRegex('a|b', re.I|re.M)", MatchesRegex('a|b', re.I|re.M)),
+        ("MatchesRegex(%r)" % (_b("\xA7"),), MatchesRegex(_b("\xA7"))),
+        ("MatchesRegex(%r)" % (_u("\xA7"),), MatchesRegex(_u("\xA7"))),
+        ]
+
+    describe_examples = [
+        ("'c' does not match /a|b/", 'c', MatchesRegex('a|b')),
+        ("'c' does not match /a\d/", 'c', MatchesRegex(r'a\d')),
+        ("%r does not match /\\s+\\xa7/" % (_b('c'),),
+            _b('c'), MatchesRegex(_b("\\s+\xA7"))),
+        ("%r does not match /\\s+\\xa7/" % (_u('c'),),
+            _u('c'), MatchesRegex(_u("\\s+\xA7"))),
+        ]
+
+
+class TestHasLength(TestCase, TestMatchersInterface):
+
+    matches_matcher = HasLength(2)
+    matches_matches = [[1, 2]]
+    matches_mismatches = [[], [1], [3, 2, 1]]
+
+    str_examples = [
+        ("HasLength(2)", HasLength(2)),
+        ]
+
+    describe_examples = [
+        ("len([]) != 1", [], HasLength(1)),
+        ]
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_datastructures.py b/third_party/testtools/testtools/tests/matchers/test_datastructures.py
new file mode 100644
index 0000000..f6d9d86
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_datastructures.py
@@ -0,0 +1,209 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+import re
+import sys
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.matchers import (
+    Annotate,
+    Equals,
+    LessThan,
+    MatchesRegex,
+    NotEquals,
+    )
+from testtools.matchers._datastructures import (
+    ContainsAll,
+    MatchesListwise,
+    MatchesStructure,
+    MatchesSetwise,
+    )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def run_doctest(obj, name):
+    p = doctest.DocTestParser()
+    t = p.get_doctest(
+        obj.__doc__, sys.modules[obj.__module__].__dict__, name, '', 0)
+    r = doctest.DocTestRunner()
+    output = StringIO()
+    r.run(t, out=output.write)
+    return r.failures, output.getvalue()
+
+
+class TestMatchesListwise(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_docstring(self):
+        failure_count, output = run_doctest(
+            MatchesListwise, "MatchesListwise")
+        if failure_count:
+            self.fail("Doctest failed with %s" % output)
+
+
+class TestMatchesStructure(TestCase, TestMatchersInterface):
+
+    class SimpleClass:
+        def __init__(self, x, y):
+            self.x = x
+            self.y = y
+
+    matches_matcher = MatchesStructure(x=Equals(1), y=Equals(2))
+    matches_matches = [SimpleClass(1, 2)]
+    matches_mismatches = [
+        SimpleClass(2, 2),
+        SimpleClass(1, 1),
+        SimpleClass(3, 3),
+        ]
+
+    str_examples = [
+        ("MatchesStructure(x=Equals(1))", MatchesStructure(x=Equals(1))),
+        ("MatchesStructure(y=Equals(2))", MatchesStructure(y=Equals(2))),
+        ("MatchesStructure(x=Equals(1), y=Equals(2))",
+         MatchesStructure(x=Equals(1), y=Equals(2))),
+        ]
+
+    describe_examples = [
+        ("""\
+Differences: [
+3 != 1: x
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(3), y=Equals(2))),
+        ("""\
+Differences: [
+3 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(1), y=Equals(3))),
+        ("""\
+Differences: [
+0 != 1: x
+0 != 2: y
+]""", SimpleClass(1, 2), MatchesStructure(x=Equals(0), y=Equals(0))),
+        ]
+
+    def test_fromExample(self):
+        self.assertThat(
+            self.SimpleClass(1, 2),
+            MatchesStructure.fromExample(self.SimpleClass(1, 3), 'x'))
+
+    def test_byEquality(self):
+        self.assertThat(
+            self.SimpleClass(1, 2),
+            MatchesStructure.byEquality(x=1))
+
+    def test_withStructure(self):
+        self.assertThat(
+            self.SimpleClass(1, 2),
+            MatchesStructure.byMatcher(LessThan, x=2))
+
+    def test_update(self):
+        self.assertThat(
+            self.SimpleClass(1, 2),
+            MatchesStructure(x=NotEquals(1)).update(x=Equals(1)))
+
+    def test_update_none(self):
+        self.assertThat(
+            self.SimpleClass(1, 2),
+            MatchesStructure(x=Equals(1), z=NotEquals(42)).update(
+                z=None))
+
+
+class TestMatchesSetwise(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def assertMismatchWithDescriptionMatching(self, value, matcher,
+                                              description_matcher):
+        mismatch = matcher.match(value)
+        if mismatch is None:
+            self.fail("%s matched %s" % (matcher, value))
+        actual_description = mismatch.describe()
+        self.assertThat(
+            actual_description,
+            Annotate(
+                "%s matching %s" % (matcher, value),
+                description_matcher))
+
+    def test_matches(self):
+        self.assertIs(
+            None, MatchesSetwise(Equals(1), Equals(2)).match([2, 1]))
+
+    def test_mismatches(self):
+        self.assertMismatchWithDescriptionMatching(
+            [2, 3], MatchesSetwise(Equals(1), Equals(2)),
+            MatchesRegex('.*There was 1 mismatch$', re.S))
+
+    def test_too_many_matchers(self):
+        self.assertMismatchWithDescriptionMatching(
+            [2, 3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+            Equals('There was 1 matcher left over: Equals(1)'))
+
+    def test_too_many_values(self):
+        self.assertMismatchWithDescriptionMatching(
+            [1, 2, 3], MatchesSetwise(Equals(1), Equals(2)),
+            Equals('There was 1 value left over: [3]'))
+
+    def test_two_too_many_matchers(self):
+        self.assertMismatchWithDescriptionMatching(
+            [3], MatchesSetwise(Equals(1), Equals(2), Equals(3)),
+            MatchesRegex(
+                'There were 2 matchers left over: Equals\([12]\), '
+                'Equals\([12]\)'))
+
+    def test_two_too_many_values(self):
+        self.assertMismatchWithDescriptionMatching(
+            [1, 2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+            MatchesRegex(
+                'There were 2 values left over: \[[34], [34]\]'))
+
+    def test_mismatch_and_too_many_matchers(self):
+        self.assertMismatchWithDescriptionMatching(
+            [2, 3], MatchesSetwise(Equals(0), Equals(1), Equals(2)),
+            MatchesRegex(
+                '.*There was 1 mismatch and 1 extra matcher: Equals\([01]\)',
+                re.S))
+
+    def test_mismatch_and_too_many_values(self):
+        self.assertMismatchWithDescriptionMatching(
+            [2, 3, 4], MatchesSetwise(Equals(1), Equals(2)),
+            MatchesRegex(
+                '.*There was 1 mismatch and 1 extra value: \[[34]\]',
+                re.S))
+
+    def test_mismatch_and_two_too_many_matchers(self):
+        self.assertMismatchWithDescriptionMatching(
+            [3, 4], MatchesSetwise(
+                Equals(0), Equals(1), Equals(2), Equals(3)),
+            MatchesRegex(
+                '.*There was 1 mismatch and 2 extra matchers: '
+                'Equals\([012]\), Equals\([012]\)', re.S))
+
+    def test_mismatch_and_two_too_many_values(self):
+        self.assertMismatchWithDescriptionMatching(
+            [2, 3, 4, 5], MatchesSetwise(Equals(1), Equals(2)),
+            MatchesRegex(
+                '.*There was 1 mismatch and 2 extra values: \[[145], [145]\]',
+                re.S))
+
+
+class TestContainsAllInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = ContainsAll(['foo', 'bar'])
+    matches_matches = [['foo', 'bar'], ['foo', 'z', 'bar'], ['bar', 'foo']]
+    matches_mismatches = [['f', 'g'], ['foo', 'baz'], []]
+
+    str_examples = [(
+        "MatchesAll(Contains('foo'), Contains('bar'))",
+        ContainsAll(['foo', 'bar'])),
+        ]
+
+    describe_examples = [("""Differences: [
+'baz' not in 'foo'
+]""",
+    'foo', ContainsAll(['foo', 'baz']))]
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_dict.py b/third_party/testtools/testtools/tests/matchers/test_dict.py
new file mode 100644
index 0000000..00368dd
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_dict.py
@@ -0,0 +1,227 @@
+from testtools import TestCase
+from testtools.matchers import (
+    Equals,
+    NotEquals,
+    Not,
+    )
+from testtools.matchers._dict import (
+    ContainedByDict,
+    ContainsDict,
+    KeysEqual,
+    MatchesAllDict,
+    MatchesDict,
+    _SubDictOf,
+    )
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestMatchesAllDictInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})
+    matches_matches = [3, 4]
+    matches_mismatches = [1, 2]
+
+    str_examples = [
+        ("MatchesAllDict({'a': NotEquals(1), 'b': NotEquals(2)})",
+         matches_matcher)]
+
+    describe_examples = [
+        ("""a: 1 == 1""", 1, matches_matcher),
+        ]
+
+
+class TestKeysEqualWithList(TestCase, TestMatchersInterface):
+
+    matches_matcher = KeysEqual('foo', 'bar')
+    matches_matches = [
+        {'foo': 0, 'bar': 1},
+        ]
+    matches_mismatches = [
+        {},
+        {'foo': 0},
+        {'bar': 1},
+        {'foo': 0, 'bar': 1, 'baz': 2},
+        {'a': None, 'b': None, 'c': None},
+        ]
+
+    str_examples = [
+        ("KeysEqual('foo', 'bar')", KeysEqual('foo', 'bar')),
+        ]
+
+    describe_examples = []
+
+    def test_description(self):
+        matchee = {'foo': 0, 'bar': 1, 'baz': 2}
+        mismatch = KeysEqual('foo', 'bar').match(matchee)
+        description = mismatch.describe()
+        self.assertThat(
+            description, Equals(
+                "['bar', 'foo'] does not match %r: Keys not equal"
+                % (matchee,)))
+
+
+class TestKeysEqualWithDict(TestKeysEqualWithList):
+
+    matches_matcher = KeysEqual({'foo': 3, 'bar': 4})
+
+
+class TestSubDictOf(TestCase, TestMatchersInterface):
+
+    matches_matcher = _SubDictOf({'foo': 'bar', 'baz': 'qux'})
+
+    matches_matches = [
+        {'foo': 'bar', 'baz': 'qux'},
+        {'foo': 'bar'},
+        ]
+
+    matches_mismatches = [
+        {'foo': 'bar', 'baz': 'qux', 'cat': 'dog'},
+        {'foo': 'bar', 'cat': 'dog'},
+        ]
+
+    str_examples = []
+    describe_examples = []
+
+
+class TestMatchesDict(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesDict(
+        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+    matches_matches = [
+        {'foo': 'bar', 'baz': None},
+        {'foo': 'bar', 'baz': 'quux'},
+        ]
+    matches_mismatches = [
+        {},
+        {'foo': 'bar', 'baz': 'qux'},
+        {'foo': 'bop', 'baz': 'qux'},
+        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+        {'foo': 'bar', 'cat': 'dog'},
+        ]
+
+    str_examples = [
+        ("MatchesDict({'baz': %s, 'foo': %s})" % (
+                Not(Equals('qux')), Equals('bar')),
+         matches_matcher),
+        ]
+
+    describe_examples = [
+        ("Missing: {\n"
+         "  'baz': Not(Equals('qux')),\n"
+         "  'foo': Equals('bar'),\n"
+         "}",
+         {}, matches_matcher),
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "}",
+         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "  'foo': 'bar' != 'bop',\n"
+         "}",
+         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+        ("Extra: {\n"
+         "  'cat': 'dog',\n"
+         "}",
+         {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'}, matches_matcher),
+        ("Extra: {\n"
+         "  'cat': 'dog',\n"
+         "}\n"
+         "Missing: {\n"
+         "  'baz': Not(Equals('qux')),\n"
+         "}",
+         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+        ]
+
+
+class TestContainsDict(TestCase, TestMatchersInterface):
+
+    matches_matcher = ContainsDict(
+        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+    matches_matches = [
+        {'foo': 'bar', 'baz': None},
+        {'foo': 'bar', 'baz': 'quux'},
+        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+        ]
+    matches_mismatches = [
+        {},
+        {'foo': 'bar', 'baz': 'qux'},
+        {'foo': 'bop', 'baz': 'qux'},
+        {'foo': 'bar', 'cat': 'dog'},
+        {'foo': 'bar'},
+        ]
+
+    str_examples = [
+        ("ContainsDict({'baz': %s, 'foo': %s})" % (
+                Not(Equals('qux')), Equals('bar')),
+         matches_matcher),
+        ]
+
+    describe_examples = [
+        ("Missing: {\n"
+         "  'baz': Not(Equals('qux')),\n"
+         "  'foo': Equals('bar'),\n"
+         "}",
+         {}, matches_matcher),
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "}",
+         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "  'foo': 'bar' != 'bop',\n"
+         "}",
+         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+        ("Missing: {\n"
+         "  'baz': Not(Equals('qux')),\n"
+         "}",
+         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+        ]
+
+
+class TestContainedByDict(TestCase, TestMatchersInterface):
+
+    matches_matcher = ContainedByDict(
+        {'foo': Equals('bar'), 'baz': Not(Equals('qux'))})
+
+    matches_matches = [
+        {},
+        {'foo': 'bar'},
+        {'foo': 'bar', 'baz': 'quux'},
+        {'baz': 'quux'},
+        ]
+    matches_mismatches = [
+        {'foo': 'bar', 'baz': 'quux', 'cat': 'dog'},
+        {'foo': 'bar', 'baz': 'qux'},
+        {'foo': 'bop', 'baz': 'qux'},
+        {'foo': 'bar', 'cat': 'dog'},
+        ]
+
+    str_examples = [
+        ("ContainedByDict({'baz': %s, 'foo': %s})" % (
+                Not(Equals('qux')), Equals('bar')),
+         matches_matcher),
+        ]
+
+    describe_examples = [
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "}",
+         {'foo': 'bar', 'baz': 'qux'}, matches_matcher),
+        ("Differences: {\n"
+         "  'baz': 'qux' matches Equals('qux'),\n"
+         "  'foo': 'bar' != 'bop',\n"
+         "}",
+         {'foo': 'bop', 'baz': 'qux'}, matches_matcher),
+        ("Extra: {\n"
+         "  'cat': 'dog',\n"
+         "}",
+         {'foo': 'bar', 'cat': 'dog'}, matches_matcher),
+        ]
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_doctest.py b/third_party/testtools/testtools/tests/matchers/test_doctest.py
new file mode 100644
index 0000000..81b9579
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_doctest.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import doctest
+
+from testtools import TestCase
+from testtools.compat import (
+    str_is_unicode,
+    _b,
+    _u,
+    )
+from testtools.matchers._doctest import DocTestMatches
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+    matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+    matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+    str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+        DocTestMatches("Ran 1 test in ...s")),
+        ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+        ]
+
+    describe_examples = [('Expected:\n    Ran 1 tests in ...s\nGot:\n'
+        '    Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+        DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesInterfaceUnicode(TestCase, TestMatchersInterface):
+
+    matches_matcher = DocTestMatches(_u("\xa7..."), doctest.ELLIPSIS)
+    matches_matches = [_u("\xa7"), _u("\xa7 more\n")]
+    matches_mismatches = ["\\xa7", _u("more \xa7"), _u("\n\xa7")]
+
+    str_examples = [("DocTestMatches(%r)" % (_u("\xa7\n"),),
+        DocTestMatches(_u("\xa7"))),
+        ]
+
+    describe_examples = [(
+        _u("Expected:\n    \xa7\nGot:\n    a\n"),
+        "a",
+        DocTestMatches(_u("\xa7"), doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test___init__simple(self):
+        matcher = DocTestMatches("foo")
+        self.assertEqual("foo\n", matcher.want)
+
+    def test___init__flags(self):
+        matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+        self.assertEqual("bar\n", matcher.want)
+        self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+    def test_describe_non_ascii_bytes(self):
+        """Even with bytestrings, the mismatch should be coercible to unicode
+
+        DocTestMatches is intended for text, but the Python 2 str type also
+        permits arbitrary binary inputs. This is a slightly bogus thing to do,
+        and under Python 3 using bytes objects will reasonably raise an error.
+        """
+        header = _b("\x89PNG\r\n\x1a\n...")
+        if str_is_unicode:
+            self.assertRaises(TypeError,
+                DocTestMatches, header, doctest.ELLIPSIS)
+            return
+        matcher = DocTestMatches(header, doctest.ELLIPSIS)
+        mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
+        # Must be treatable as unicode text, the exact output matters less
+        self.assertTrue(unicode(mismatch.describe()))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_exception.py b/third_party/testtools/testtools/tests/matchers/test_exception.py
new file mode 100644
index 0000000..a74043a
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_exception.py
@@ -0,0 +1,187 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import sys
+
+from testtools import TestCase
+from testtools.matchers import (
+    AfterPreprocessing,
+    Equals,
+    )
+from testtools.matchers._exception import (
+    MatchesException,
+    Raises,
+    raises,
+    )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+def make_error(type, *args, **kwargs):
+    try:
+        raise type(*args, **kwargs)
+    except type:
+        return sys.exc_info()
+
+
+class TestMatchesExceptionInstanceInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesException(ValueError("foo"))
+    error_foo = make_error(ValueError, 'foo')
+    error_bar = make_error(ValueError, 'bar')
+    error_base_foo = make_error(Exception, 'foo')
+    matches_matches = [error_foo]
+    matches_mismatches = [error_bar, error_base_foo]
+
+    str_examples = [
+        ("MatchesException(Exception('foo',))",
+         MatchesException(Exception('foo')))
+        ]
+    describe_examples = [
+        ("%r is not a %r" % (Exception, ValueError),
+         error_base_foo,
+         MatchesException(ValueError("foo"))),
+        ("ValueError('bar',) has different arguments to ValueError('foo',).",
+         error_bar,
+         MatchesException(ValueError("foo"))),
+        ]
+
+
+class TestMatchesExceptionTypeInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesException(ValueError)
+    error_foo = make_error(ValueError, 'foo')
+    error_sub = make_error(UnicodeError, 'bar')
+    error_base_foo = make_error(Exception, 'foo')
+    matches_matches = [error_foo, error_sub]
+    matches_mismatches = [error_base_foo]
+
+    str_examples = [
+        ("MatchesException(%r)" % Exception,
+         MatchesException(Exception))
+        ]
+    describe_examples = [
+        ("%r is not a %r" % (Exception, ValueError),
+         error_base_foo,
+         MatchesException(ValueError)),
+        ]
+
+
+class TestMatchesExceptionTypeReInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesException(ValueError, 'fo.')
+    error_foo = make_error(ValueError, 'foo')
+    error_sub = make_error(UnicodeError, 'foo')
+    error_bar = make_error(ValueError, 'bar')
+    matches_matches = [error_foo, error_sub]
+    matches_mismatches = [error_bar]
+
+    str_examples = [
+        ("MatchesException(%r)" % Exception,
+         MatchesException(Exception, 'fo.'))
+        ]
+    describe_examples = [
+        ("'bar' does not match /fo./",
+         error_bar, MatchesException(ValueError, "fo.")),
+        ]
+
+
+class TestMatchesExceptionTypeMatcherInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesException(
+        ValueError, AfterPreprocessing(str, Equals('foo')))
+    error_foo = make_error(ValueError, 'foo')
+    error_sub = make_error(UnicodeError, 'foo')
+    error_bar = make_error(ValueError, 'bar')
+    matches_matches = [error_foo, error_sub]
+    matches_mismatches = [error_bar]
+
+    str_examples = [
+        ("MatchesException(%r)" % Exception,
+         MatchesException(Exception, Equals('foo')))
+        ]
+    describe_examples = [
+        ("5 != %r" % (error_bar[1],),
+         error_bar, MatchesException(ValueError, Equals(5))),
+        ]
+
+
+class TestRaisesInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = Raises()
+    def boom():
+        raise Exception('foo')
+    matches_matches = [boom]
+    matches_mismatches = [lambda:None]
+
+    # Tricky to get function objects to render constantly, and the interfaces
+    # helper uses assertEqual rather than (for instance) DocTestMatches.
+    str_examples = []
+
+    describe_examples = []
+
+
+class TestRaisesExceptionMatcherInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = Raises(
+        exception_matcher=MatchesException(Exception('foo')))
+    def boom_bar():
+        raise Exception('bar')
+    def boom_foo():
+        raise Exception('foo')
+    matches_matches = [boom_foo]
+    matches_mismatches = [lambda:None, boom_bar]
+
+    # Tricky to get function objects to render constantly, and the interfaces
+    # helper uses assertEqual rather than (for instance) DocTestMatches.
+    str_examples = []
+
+    describe_examples = []
+
+
+class TestRaisesBaseTypes(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def raiser(self):
+        raise KeyboardInterrupt('foo')
+
+    def test_KeyboardInterrupt_matched(self):
+        # When KeyboardInterrupt is matched, it is swallowed.
+        matcher = Raises(MatchesException(KeyboardInterrupt))
+        self.assertThat(self.raiser, matcher)
+
+    def test_KeyboardInterrupt_propogates(self):
+        # The default 'it raised' propogates KeyboardInterrupt.
+        match_keyb = Raises(MatchesException(KeyboardInterrupt))
+        def raise_keyb_from_match():
+            matcher = Raises()
+            matcher.match(self.raiser)
+        self.assertThat(raise_keyb_from_match, match_keyb)
+
+    def test_KeyboardInterrupt_match_Exception_propogates(self):
+        # If the raised exception isn't matched, and it is not a subclass of
+        # Exception, it is propogated.
+        match_keyb = Raises(MatchesException(KeyboardInterrupt))
+        def raise_keyb_from_match():
+            matcher = Raises(MatchesException(Exception))
+            matcher.match(self.raiser)
+        self.assertThat(raise_keyb_from_match, match_keyb)
+
+
+class TestRaisesConvenience(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_exc_type(self):
+        self.assertThat(lambda: 1/0, raises(ZeroDivisionError))
+
+    def test_exc_value(self):
+        e = RuntimeError("You lose!")
+        def raiser():
+            raise e
+        self.assertThat(raiser, raises(e))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_filesystem.py b/third_party/testtools/testtools/tests/matchers/test_filesystem.py
new file mode 100644
index 0000000..917ff2e
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_filesystem.py
@@ -0,0 +1,243 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import os
+import shutil
+import tarfile
+import tempfile
+
+from testtools import TestCase
+from testtools.matchers import (
+    Contains,
+    DocTestMatches,
+    Equals,
+    )
+from testtools.matchers._filesystem import (
+    DirContains,
+    DirExists,
+    FileContains,
+    FileExists,
+    HasPermissions,
+    PathExists,
+    SamePath,
+    TarballContains,
+    )
+
+
+class PathHelpers(object):
+
+    def mkdtemp(self):
+        directory = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, directory)
+        return directory
+
+    def create_file(self, filename, contents=''):
+        fp = open(filename, 'w')
+        try:
+            fp.write(contents)
+        finally:
+            fp.close()
+
+    def touch(self, filename):
+        return self.create_file(filename)
+
+
+class TestPathExists(TestCase, PathHelpers):
+
+    def test_exists(self):
+        tempdir = self.mkdtemp()
+        self.assertThat(tempdir, PathExists())
+
+    def test_not_exists(self):
+        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+        mismatch = PathExists().match(doesntexist)
+        self.assertThat(
+            "%s does not exist." % doesntexist, Equals(mismatch.describe()))
+
+
+class TestDirExists(TestCase, PathHelpers):
+
+    def test_exists(self):
+        tempdir = self.mkdtemp()
+        self.assertThat(tempdir, DirExists())
+
+    def test_not_exists(self):
+        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+        mismatch = DirExists().match(doesntexist)
+        self.assertThat(
+            PathExists().match(doesntexist).describe(),
+            Equals(mismatch.describe()))
+
+    def test_not_a_directory(self):
+        filename = os.path.join(self.mkdtemp(), 'foo')
+        self.touch(filename)
+        mismatch = DirExists().match(filename)
+        self.assertThat(
+            "%s is not a directory." % filename, Equals(mismatch.describe()))
+
+
+class TestFileExists(TestCase, PathHelpers):
+
+    def test_exists(self):
+        tempdir = self.mkdtemp()
+        filename = os.path.join(tempdir, 'filename')
+        self.touch(filename)
+        self.assertThat(filename, FileExists())
+
+    def test_not_exists(self):
+        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+        mismatch = FileExists().match(doesntexist)
+        self.assertThat(
+            PathExists().match(doesntexist).describe(),
+            Equals(mismatch.describe()))
+
+    def test_not_a_file(self):
+        tempdir = self.mkdtemp()
+        mismatch = FileExists().match(tempdir)
+        self.assertThat(
+            "%s is not a file." % tempdir, Equals(mismatch.describe()))
+
+
+class TestDirContains(TestCase, PathHelpers):
+
+    def test_empty(self):
+        tempdir = self.mkdtemp()
+        self.assertThat(tempdir, DirContains([]))
+
+    def test_not_exists(self):
+        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+        mismatch = DirContains([]).match(doesntexist)
+        self.assertThat(
+            PathExists().match(doesntexist).describe(),
+            Equals(mismatch.describe()))
+
+    def test_contains_files(self):
+        tempdir = self.mkdtemp()
+        self.touch(os.path.join(tempdir, 'foo'))
+        self.touch(os.path.join(tempdir, 'bar'))
+        self.assertThat(tempdir, DirContains(['bar', 'foo']))
+
+    def test_matcher(self):
+        tempdir = self.mkdtemp()
+        self.touch(os.path.join(tempdir, 'foo'))
+        self.touch(os.path.join(tempdir, 'bar'))
+        self.assertThat(tempdir, DirContains(matcher=Contains('bar')))
+
+    def test_neither_specified(self):
+        self.assertRaises(AssertionError, DirContains)
+
+    def test_both_specified(self):
+        self.assertRaises(
+            AssertionError, DirContains, filenames=[], matcher=Contains('a'))
+
+    def test_does_not_contain_files(self):
+        tempdir = self.mkdtemp()
+        self.touch(os.path.join(tempdir, 'foo'))
+        mismatch = DirContains(['bar', 'foo']).match(tempdir)
+        self.assertThat(
+            Equals(['bar', 'foo']).match(['foo']).describe(),
+            Equals(mismatch.describe()))
+
+
+class TestFileContains(TestCase, PathHelpers):
+
+    def test_not_exists(self):
+        doesntexist = os.path.join(self.mkdtemp(), 'doesntexist')
+        mismatch = FileContains('').match(doesntexist)
+        self.assertThat(
+            PathExists().match(doesntexist).describe(),
+            Equals(mismatch.describe()))
+
+    def test_contains(self):
+        tempdir = self.mkdtemp()
+        filename = os.path.join(tempdir, 'foo')
+        self.create_file(filename, 'Hello World!')
+        self.assertThat(filename, FileContains('Hello World!'))
+
+    def test_matcher(self):
+        tempdir = self.mkdtemp()
+        filename = os.path.join(tempdir, 'foo')
+        self.create_file(filename, 'Hello World!')
+        self.assertThat(
+            filename, FileContains(matcher=DocTestMatches('Hello World!')))
+
+    def test_neither_specified(self):
+        self.assertRaises(AssertionError, FileContains)
+
+    def test_both_specified(self):
+        self.assertRaises(
+            AssertionError, FileContains, contents=[], matcher=Contains('a'))
+
+    def test_does_not_contain(self):
+        tempdir = self.mkdtemp()
+        filename = os.path.join(tempdir, 'foo')
+        self.create_file(filename, 'Goodbye Cruel World!')
+        mismatch = FileContains('Hello World!').match(filename)
+        self.assertThat(
+            Equals('Hello World!').match('Goodbye Cruel World!').describe(),
+            Equals(mismatch.describe()))
+class TestTarballContains(TestCase, PathHelpers):
+
+    def test_match(self):
+        tempdir = self.mkdtemp()
+        in_temp_dir = lambda x: os.path.join(tempdir, x)
+        self.touch(in_temp_dir('a'))
+        self.touch(in_temp_dir('b'))
+        tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+        tarball.add(in_temp_dir('a'), 'a')
+        tarball.add(in_temp_dir('b'), 'b')
+        tarball.close()
+        self.assertThat(
+            in_temp_dir('foo.tar.gz'), TarballContains(['b', 'a']))
+
+    def test_mismatch(self):
+        tempdir = self.mkdtemp()
+        in_temp_dir = lambda x: os.path.join(tempdir, x)
+        self.touch(in_temp_dir('a'))
+        self.touch(in_temp_dir('b'))
+        tarball = tarfile.open(in_temp_dir('foo.tar.gz'), 'w')
+        tarball.add(in_temp_dir('a'), 'a')
+        tarball.add(in_temp_dir('b'), 'b')
+        tarball.close()
+        mismatch = TarballContains(['d', 'c']).match(in_temp_dir('foo.tar.gz'))
+        self.assertEqual(
+            mismatch.describe(),
+            Equals(['c', 'd']).match(['a', 'b']).describe())
+
+
+class TestSamePath(TestCase, PathHelpers):
+
+    def test_same_string(self):
+        self.assertThat('foo', SamePath('foo'))
+
+    def test_relative_and_absolute(self):
+        path = 'foo'
+        abspath = os.path.abspath(path)
+        self.assertThat(path, SamePath(abspath))
+        self.assertThat(abspath, SamePath(path))
+
+    def test_real_path(self):
+        tempdir = self.mkdtemp()
+        source = os.path.join(tempdir, 'source')
+        self.touch(source)
+        target = os.path.join(tempdir, 'target')
+        try:
+            os.symlink(source, target)
+        except (AttributeError, NotImplementedError):
+            self.skip("No symlink support")
+        self.assertThat(source, SamePath(target))
+        self.assertThat(target, SamePath(source))
+
+
+class TestHasPermissions(TestCase, PathHelpers):
+
+    def test_match(self):
+        tempdir = self.mkdtemp()
+        filename = os.path.join(tempdir, 'filename')
+        self.touch(filename)
+        permissions = oct(os.stat(filename).st_mode)[-4:]
+        self.assertThat(filename, HasPermissions(permissions))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_higherorder.py b/third_party/testtools/testtools/tests/matchers/test_higherorder.py
new file mode 100644
index 0000000..fb86b7f
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_higherorder.py
@@ -0,0 +1,254 @@
+# Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import (
+    DocTestMatches,
+    Equals,
+    LessThan,
+    MatchesStructure,
+    Mismatch,
+    NotEquals,
+    )
+from testtools.matchers._higherorder import (
+    AfterPreprocessing,
+    AllMatch,
+    Annotate,
+    AnnotatedMismatch,
+    AnyMatch,
+    MatchesAny,
+    MatchesAll,
+    MatchesPredicate,
+    MatchesPredicateWithParams,
+    Not,
+    )
+from testtools.tests.helpers import FullStackRunTest
+from testtools.tests.matchers.helpers import TestMatchersInterface
+
+
+class TestAllMatch(TestCase, TestMatchersInterface):
+
+    matches_matcher = AllMatch(LessThan(10))
+    matches_matches = [
+        [9, 9, 9],
+        (9, 9),
+        iter([9, 9, 9, 9, 9]),
+        ]
+    matches_mismatches = [
+        [11, 9, 9],
+        iter([9, 12, 9, 11]),
+        ]
+
+    str_examples = [
+        ("AllMatch(LessThan(12))", AllMatch(LessThan(12))),
+        ]
+
+    describe_examples = [
+        ('Differences: [\n'
+         '10 is not > 11\n'
+         '10 is not > 10\n'
+         ']',
+         [11, 9, 10],
+         AllMatch(LessThan(10))),
+        ]
+
+
+class TestAnyMatch(TestCase, TestMatchersInterface):
+
+    matches_matcher = AnyMatch(Equals('elephant'))
+    matches_matches = [
+        ['grass', 'cow', 'steak', 'milk', 'elephant'],
+        (13, 'elephant'),
+        ['elephant', 'elephant', 'elephant'],
+        set(['hippo', 'rhino', 'elephant']),
+        ]
+    matches_mismatches = [
+        [],
+        ['grass', 'cow', 'steak', 'milk'],
+        (13, 12, 10),
+        ['element', 'hephalump', 'pachyderm'],
+        set(['hippo', 'rhino', 'diplodocus']),
+        ]
+
+    str_examples = [
+        ("AnyMatch(Equals('elephant'))", AnyMatch(Equals('elephant'))),
+        ]
+
+    describe_examples = [
+        ('Differences: [\n'
+         '7 != 11\n'
+         '7 != 9\n'
+         '7 != 10\n'
+         ']',
+         [11, 9, 10],
+         AnyMatch(Equals(7))),
+        ]
+
+
+class TestAfterPreprocessing(TestCase, TestMatchersInterface):
+
+    def parity(x):
+        return x % 2
+
+    matches_matcher = AfterPreprocessing(parity, Equals(1))
+    matches_matches = [3, 5]
+    matches_mismatches = [2]
+
+    str_examples = [
+        ("AfterPreprocessing(<function parity>, Equals(1))",
+         AfterPreprocessing(parity, Equals(1))),
+        ]
+
+    describe_examples = [
+        ("1 != 0: after <function parity> on 2", 2,
+         AfterPreprocessing(parity, Equals(1))),
+        ("1 != 0", 2,
+         AfterPreprocessing(parity, Equals(1), annotate=False)),
+        ]
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+    matches_matches = ["1", "2"]
+    matches_mismatches = ["3"]
+
+    str_examples = [(
+        "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+        MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+        ]
+
+    describe_examples = [("""Differences: [
+Expected:
+    1
+Got:
+    3
+
+Expected:
+    2
+Got:
+    3
+
+]""",
+        "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+    matches_matches = [3, 4]
+    matches_mismatches = [1, 2]
+
+    str_examples = [
+        ("MatchesAll(NotEquals(1), NotEquals(2))",
+         MatchesAll(NotEquals(1), NotEquals(2)))]
+
+    describe_examples = [
+        ("""Differences: [
+1 == 1
+]""",
+         1, MatchesAll(NotEquals(1), NotEquals(2))),
+        ("1 == 1", 1,
+         MatchesAll(NotEquals(2), NotEquals(1), Equals(3), first_only=True)),
+        ]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+    matches_matcher = Annotate("foo", Equals(1))
+    matches_matches = [1]
+    matches_mismatches = [2]
+
+    str_examples = [
+        ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+    describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+    def test_if_message_no_message(self):
+        # Annotate.if_message returns the given matcher if there is no
+        # message.
+        matcher = Equals(1)
+        not_annotated = Annotate.if_message('', matcher)
+        self.assertIs(matcher, not_annotated)
+
+    def test_if_message_given_message(self):
+        # Annotate.if_message returns an annotated version of the matcher if a
+        # message is provided.
+        matcher = Equals(1)
+        expected = Annotate('foo', matcher)
+        annotated = Annotate.if_message('foo', matcher)
+        self.assertThat(
+            annotated,
+            MatchesStructure.fromExample(expected, 'annotation', 'matcher'))
+
+
+class TestAnnotatedMismatch(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_forwards_details(self):
+        x = Mismatch('description', {'foo': 'bar'})
+        annotated = AnnotatedMismatch("annotation", x)
+        self.assertEqual(x.get_details(), annotated.get_details())
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+    matches_matcher = Not(Equals(1))
+    matches_matches = [2]
+    matches_mismatches = [1]
+
+    str_examples = [
+        ("Not(Equals(1))", Not(Equals(1))),
+        ("Not(Equals('1'))", Not(Equals('1')))]
+
+    describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+def is_even(x):
+    return x % 2 == 0
+
+
+class TestMatchesPredicate(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesPredicate(is_even, "%s is not even")
+    matches_matches = [2, 4, 6, 8]
+    matches_mismatches = [3, 5, 7, 9]
+
+    str_examples = [
+        ("MatchesPredicate(%r, %r)" % (is_even, "%s is not even"),
+         MatchesPredicate(is_even, "%s is not even")),
+        ]
+
+    describe_examples = [
+        ('7 is not even', 7, MatchesPredicate(is_even, "%s is not even")),
+        ]
+
+
+def between(x, low, high):
+    return low < x < high
+
+
+class TestMatchesPredicateWithParams(TestCase, TestMatchersInterface):
+
+    matches_matcher = MatchesPredicateWithParams(
+        between, "{0} is not between {1} and {2}")(1, 9)
+    matches_matches = [2, 4, 6, 8]
+    matches_mismatches = [0, 1, 9, 10]
+
+    str_examples = [
+        ("MatchesPredicateWithParams(%r, %r)(%s)" % (
+            between, "{0} is not between {1} and {2}", "1, 2"),
+         MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}")(1, 2)),
+        ("Between(1, 2)", MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}", "Between")(1, 2)),
+        ]
+
+    describe_examples = [
+        ('1 is not between 2 and 3', 1, MatchesPredicateWithParams(
+            between, "{0} is not between {1} and {2}")(2, 3)),
+        ]
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/matchers/test_impl.py b/third_party/testtools/testtools/tests/matchers/test_impl.py
new file mode 100644
index 0000000..10967ea
--- /dev/null
+++ b/third_party/testtools/testtools/tests/matchers/test_impl.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for matchers."""
+
+from testtools import (
+    Matcher, # check that Matcher is exposed at the top level for docs.
+    TestCase,
+    )
+from testtools.compat import (
+    str_is_unicode,
+    text_repr,
+    _u,
+    )
+from testtools.matchers import (
+    Equals,
+    MatchesException,
+    Raises,
+    )
+from testtools.matchers._impl import (
+    Mismatch,
+    MismatchDecorator,
+    MismatchError,
+    )
+from testtools.tests.helpers import FullStackRunTest
+
+# Silence pyflakes.
+Matcher
+
+
+class TestMismatch(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_constructor_arguments(self):
+        mismatch = Mismatch("some description", {'detail': "things"})
+        self.assertEqual("some description", mismatch.describe())
+        self.assertEqual({'detail': "things"}, mismatch.get_details())
+
+    def test_constructor_no_arguments(self):
+        mismatch = Mismatch()
+        self.assertThat(mismatch.describe,
+            Raises(MatchesException(NotImplementedError)))
+        self.assertEqual({}, mismatch.get_details())
+
+
+class TestMismatchError(TestCase):
+
+    def test_is_assertion_error(self):
+        # MismatchError is an AssertionError, so that most of the time, it
+        # looks like a test failure, rather than an error.
+        def raise_mismatch_error():
+            raise MismatchError(2, Equals(3), Equals(3).match(2))
+        self.assertRaises(AssertionError, raise_mismatch_error)
+
+    def test_default_description_is_mismatch(self):
+        mismatch = Equals(3).match(2)
+        e = MismatchError(2, Equals(3), mismatch)
+        self.assertEqual(mismatch.describe(), str(e))
+
+    def test_default_description_unicode(self):
+        matchee = _u('\xa7')
+        matcher = Equals(_u('a'))
+        mismatch = matcher.match(matchee)
+        e = MismatchError(matchee, matcher, mismatch)
+        self.assertEqual(mismatch.describe(), str(e))
+
+    def test_verbose_description(self):
+        matchee = 2
+        matcher = Equals(3)
+        mismatch = matcher.match(2)
+        e = MismatchError(matchee, matcher, mismatch, True)
+        expected = (
+            'Match failed. Matchee: %r\n'
+            'Matcher: %s\n'
+            'Difference: %s\n' % (
+                matchee,
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        self.assertEqual(expected, str(e))
+
+    def test_verbose_unicode(self):
+        # When assertThat is given matchees or matchers that contain non-ASCII
+        # unicode strings, we can still provide a meaningful error.
+        matchee = _u('\xa7')
+        matcher = Equals(_u('a'))
+        mismatch = matcher.match(matchee)
+        expected = (
+            'Match failed. Matchee: %s\n'
+            'Matcher: %s\n'
+            'Difference: %s\n' % (
+                text_repr(matchee),
+                matcher,
+                mismatch.describe(),
+                ))
+        e = MismatchError(matchee, matcher, mismatch, True)
+        if str_is_unicode:
+            actual = str(e)
+        else:
+            actual = unicode(e)
+            # Using str() should still work, and return ascii only
+            self.assertEqual(
+                expected.replace(matchee, matchee.encode("unicode-escape")),
+                str(e).decode("ascii"))
+        self.assertEqual(expected, actual)
+
+
+class TestMismatchDecorator(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def test_forwards_description(self):
+        x = Mismatch("description", {'foo': 'bar'})
+        decorated = MismatchDecorator(x)
+        self.assertEqual(x.describe(), decorated.describe())
+
+    def test_forwards_details(self):
+        x = Mismatch("description", {'foo': 'bar'})
+        decorated = MismatchDecorator(x)
+        self.assertEqual(x.get_details(), decorated.get_details())
+
+    def test_repr(self):
+        x = Mismatch("description", {'foo': 'bar'})
+        decorated = MismatchDecorator(x)
+        self.assertEqual(
+            '<testtools.matchers.MismatchDecorator(%r)>' % (x,),
+            repr(decorated))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_assert_that.py b/third_party/testtools/testtools/tests/test_assert_that.py
new file mode 100644
index 0000000..66b4166
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_assert_that.py
@@ -0,0 +1,152 @@
+from doctest import ELLIPSIS
+
+from testtools import (
+    TestCase,
+    )
+from testtools.assertions import (
+    assert_that,
+    )
+from testtools.compat import (
+    _u,
+    )
+from testtools.content import (
+    TracebackContent,
+    )
+from testtools.matchers import (
+    Annotate,
+    DocTestMatches,
+    Equals,
+    )
+
+
+class AssertThatTests(object):
+    """A mixin containing shared tests for assertThat and assert_that."""
+
+    def assert_that_callable(self, *args, **kwargs):
+        raise NotImplementedError
+
+    def assertFails(self, message, function, *args, **kwargs):
+        """Assert that function raises a failure with the given message."""
+        failure = self.assertRaises(
+            self.failureException, function, *args, **kwargs)
+        self.assert_that_callable(failure, DocTestMatches(message, ELLIPSIS))
+
+    def test_assertThat_matches_clean(self):
+        class Matcher(object):
+            def match(self, foo):
+                return None
+        self.assert_that_callable("foo", Matcher())
+
+    def test_assertThat_mismatch_raises_description(self):
+        calls = []
+        class Mismatch(object):
+            def __init__(self, thing):
+                self.thing = thing
+            def describe(self):
+                calls.append(('describe_diff', self.thing))
+                return "object is not a thing"
+            def get_details(self):
+                return {}
+        class Matcher(object):
+            def match(self, thing):
+                calls.append(('match', thing))
+                return Mismatch(thing)
+            def __str__(self):
+                calls.append(('__str__',))
+                return "a description"
+        class Test(type(self)):
+            def test(self):
+                self.assert_that_callable("foo", Matcher())
+        result = Test("test").run()
+        self.assertEqual([
+            ('match', "foo"),
+            ('describe_diff', "foo"),
+            ], calls)
+        self.assertFalse(result.wasSuccessful())
+
+    def test_assertThat_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = matcher.match(matchee).describe()
+        self.assertFails(expected, self.assert_that_callable, matchee, matcher)
+
+    def test_assertThat_message_is_annotated(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = Annotate('woo', matcher).match(matchee).describe()
+        self.assertFails(expected,
+                         self.assert_that_callable, matchee, matcher, 'woo')
+
+    def test_assertThat_verbose_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = (
+            'Match failed. Matchee: %r\n'
+            'Matcher: %s\n'
+            'Difference: %s\n' % (
+                matchee,
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        self.assertFails(
+            expected,
+            self.assert_that_callable, matchee, matcher, verbose=True)
+
+    def get_error_string(self, e):
+        """Get the string showing how 'e' would be formatted in test output.
+
+        This is a little bit hacky, since it's designed to give consistent
+        output regardless of Python version.
+
+        In testtools, TestResult._exc_info_to_unicode is the point of dispatch
+        between various different implementations of methods that format
+        exceptions, so that's what we have to call. However, that method cares
+        about stack traces and formats the exception class. We don't care
+        about either of these, so we take its output and parse it a little.
+        """
+        error = TracebackContent((e.__class__, e, None), self).as_text()
+        # We aren't at all interested in the traceback.
+        if error.startswith('Traceback (most recent call last):\n'):
+            lines = error.splitlines(True)[1:]
+            for i, line in enumerate(lines):
+                if not line.startswith(' '):
+                    break
+            error = ''.join(lines[i:])
+        # We aren't interested in how the exception type is formatted.
+        exc_class, error = error.split(': ', 1)
+        return error
+
+    def test_assertThat_verbose_unicode(self):
+        # When assertThat is given matchees or matchers that contain non-ASCII
+        # unicode strings, we can still provide a meaningful error.
+        matchee = _u('\xa7')
+        matcher = Equals(_u('a'))
+        expected = (
+            'Match failed. Matchee: %s\n'
+            'Matcher: %s\n'
+            'Difference: %s\n\n' % (
+                repr(matchee).replace("\\xa7", matchee),
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        e = self.assertRaises(
+            self.failureException, self.assert_that_callable, matchee, matcher,
+            verbose=True)
+        self.assertEqual(expected, self.get_error_string(e))
+
+
+class TestAssertThatFunction(AssertThatTests, TestCase):
+
+    def assert_that_callable(self, *args, **kwargs):
+        return assert_that(*args, **kwargs)
+
+
+class TestAssertThatMethod(AssertThatTests, TestCase):
+
+    def assert_that_callable(self, *args, **kwargs):
+        return self.assertThat(*args, **kwargs)
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_compat.py b/third_party/testtools/testtools/tests/test_compat.py
new file mode 100644
index 0000000..84e57be
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_compat.py
@@ -0,0 +1,603 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for miscellaneous compatibility functions"""
+
+import io
+import linecache
+import os
+import sys
+import tempfile
+import traceback
+
+import testtools
+
+from testtools.compat import (
+    _b,
+    _detect_encoding,
+    _format_exc_info,
+    _format_exception_only,
+    _format_stack_list,
+    _get_source_encoding,
+    _u,
+    reraise,
+    str_is_unicode,
+    text_repr,
+    unicode_output_stream,
+    )
+from testtools.matchers import (
+    Equals,
+    Is,
+    IsInstance,
+    MatchesException,
+    Not,
+    Raises,
+    )
+
+
+class TestDetectEncoding(testtools.TestCase):
+    """Test detection of Python source encodings"""
+
+    def _check_encoding(self, expected, lines, possibly_invalid=False):
+        """Check lines are valid Python and encoding is as expected"""
+        if not possibly_invalid:
+            compile(_b("".join(lines)), "<str>", "exec")
+        encoding = _detect_encoding(lines)
+        self.assertEqual(expected, encoding,
+            "Encoding %r expected but got %r from lines %r" %
+                (expected, encoding, lines))
+
+    def test_examples_from_pep(self):
+        """Check the examples given in PEP 263 all work as specified
+
+        See 'Examples' section of <http://www.python.org/dev/peps/pep-0263/>
+        """
+        # With interpreter binary and using Emacs style file encoding comment:
+        self._check_encoding("latin-1", (
+            "#!/usr/bin/python\n",
+            "# -*- coding: latin-1 -*-\n",
+            "import os, sys\n"))
+        self._check_encoding("iso-8859-15", (
+            "#!/usr/bin/python\n",
+            "# -*- coding: iso-8859-15 -*-\n",
+            "import os, sys\n"))
+        self._check_encoding("ascii", (
+            "#!/usr/bin/python\n",
+            "# -*- coding: ascii -*-\n",
+            "import os, sys\n"))
+        # Without interpreter line, using plain text:
+        self._check_encoding("utf-8", (
+            "# This Python file uses the following encoding: utf-8\n",
+            "import os, sys\n"))
+        # Text editors might have different ways of defining the file's
+        # encoding, e.g.
+        self._check_encoding("latin-1", (
+            "#!/usr/local/bin/python\n",
+            "# coding: latin-1\n",
+            "import os, sys\n"))
+        # Without encoding comment, Python's parser will assume ASCII text:
+        self._check_encoding("ascii", (
+            "#!/usr/local/bin/python\n",
+            "import os, sys\n"))
+        # Encoding comments which don't work:
+        #   Missing "coding:" prefix:
+        self._check_encoding("ascii", (
+            "#!/usr/local/bin/python\n",
+            "# latin-1\n",
+            "import os, sys\n"))
+        #   Encoding comment not on line 1 or 2:
+        self._check_encoding("ascii", (
+            "#!/usr/local/bin/python\n",
+            "#\n",
+            "# -*- coding: latin-1 -*-\n",
+            "import os, sys\n"))
+        #   Unsupported encoding:
+        self._check_encoding("ascii", (
+            "#!/usr/local/bin/python\n",
+            "# -*- coding: utf-42 -*-\n",
+            "import os, sys\n"),
+            possibly_invalid=True)
+
+    def test_bom(self):
+        """Test the UTF-8 BOM counts as an encoding declaration"""
+        self._check_encoding("utf-8", (
+            "\xef\xbb\xbfimport sys\n",
+            ))
+        self._check_encoding("utf-8", (
+            "\xef\xbb\xbf# File encoding: utf-8\n",
+            ))
+        self._check_encoding("utf-8", (
+            '\xef\xbb\xbf"""Module docstring\n',
+            '\xef\xbb\xbfThat should just be a ZWNB"""\n'))
+        self._check_encoding("latin-1", (
+            '"""Is this coding: latin-1 or coding: utf-8 instead?\n',
+            '\xef\xbb\xbfThose should be latin-1 bytes"""\n'))
+        self._check_encoding("utf-8", (
+            "\xef\xbb\xbf# Is the coding: utf-8 or coding: euc-jp instead?\n",
+            '"""Module docstring say \xe2\x98\x86"""\n'),
+            possibly_invalid=True)
+
+    def test_multiple_coding_comments(self):
+        """Test only the first of multiple coding declarations counts"""
+        self._check_encoding("iso-8859-1", (
+            "# Is the coding: iso-8859-1\n",
+            "# Or is it coding: iso-8859-2\n"),
+            possibly_invalid=True)
+        self._check_encoding("iso-8859-1", (
+            "#!/usr/bin/python\n",
+            "# Is the coding: iso-8859-1\n",
+            "# Or is it coding: iso-8859-2\n"))
+        self._check_encoding("iso-8859-1", (
+            "# Is the coding: iso-8859-1 or coding: iso-8859-2\n",
+            "# Or coding: iso-8859-3 or coding: iso-8859-4\n"),
+            possibly_invalid=True)
+        self._check_encoding("iso-8859-2", (
+            "# Is the coding iso-8859-1 or coding: iso-8859-2\n",
+            "# Spot the missing colon above\n"))
+
+
+class TestGetSourceEncoding(testtools.TestCase):
+    """Test reading and caching the encodings of source files"""
+
+    def setUp(self):
+        testtools.TestCase.setUp(self)
+        dir = tempfile.mkdtemp()
+        self.addCleanup(os.rmdir, dir)
+        self.filename = os.path.join(dir, self.id().rsplit(".", 1)[1] + ".py")
+        self._written = False
+
+    def put_source(self, text):
+        f = open(self.filename, "w")
+        try:
+            f.write(text)
+        finally:
+            f.close()
+            if not self._written:
+                self._written = True
+                self.addCleanup(os.remove, self.filename)
+                self.addCleanup(linecache.cache.pop, self.filename, None)
+
+    def test_nonexistant_file_as_ascii(self):
+        """When file can't be found, the encoding should default to ascii"""
+        self.assertEquals("ascii", _get_source_encoding(self.filename))
+
+    def test_encoding_is_cached(self):
+        """The encoding should stay the same if the cache isn't invalidated"""
+        self.put_source(
+            "# coding: iso-8859-13\n"
+            "import os\n")
+        self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+        self.put_source(
+            "# coding: rot-13\n"
+            "vzcbeg bf\n")
+        self.assertEquals("iso-8859-13", _get_source_encoding(self.filename))
+
+    def test_traceback_rechecks_encoding(self):
+        """A traceback function checks the cache and resets the encoding"""
+        self.put_source(
+            "# coding: iso-8859-8\n"
+            "import os\n")
+        self.assertEquals("iso-8859-8", _get_source_encoding(self.filename))
+        self.put_source(
+            "# coding: utf-8\n"
+            "import os\n")
+        try:
+            exec (compile("raise RuntimeError\n", self.filename, "exec"))
+        except RuntimeError:
+            traceback.extract_tb(sys.exc_info()[2])
+        else:
+            self.fail("RuntimeError not raised")
+        self.assertEquals("utf-8", _get_source_encoding(self.filename))
+
+
+class _FakeOutputStream(object):
+    """A simple file-like object for testing"""
+
+    def __init__(self):
+        self.writelog = []
+
+    def write(self, obj):
+        self.writelog.append(obj)
+
+
+class TestUnicodeOutputStream(testtools.TestCase):
+    """Test wrapping output streams so they work with arbitrary unicode"""
+
+    uni = _u("pa\u026a\u03b8\u0259n")
+
+    def setUp(self):
+        super(TestUnicodeOutputStream, self).setUp()
+        if sys.platform == "cli":
+            self.skip("IronPython shouldn't wrap streams to do encoding")
+
+    def test_no_encoding_becomes_ascii(self):
+        """A stream with no encoding attribute gets ascii/replace strings"""
+        sout = _FakeOutputStream()
+        unicode_output_stream(sout).write(self.uni)
+        self.assertEqual([_b("pa???n")], sout.writelog)
+
+    def test_encoding_as_none_becomes_ascii(self):
+        """A stream with encoding value of None gets ascii/replace strings"""
+        sout = _FakeOutputStream()
+        sout.encoding = None
+        unicode_output_stream(sout).write(self.uni)
+        self.assertEqual([_b("pa???n")], sout.writelog)
+
+    def test_bogus_encoding_becomes_ascii(self):
+        """A stream with a bogus encoding gets ascii/replace strings"""
+        sout = _FakeOutputStream()
+        sout.encoding = "bogus"
+        unicode_output_stream(sout).write(self.uni)
+        self.assertEqual([_b("pa???n")], sout.writelog)
+
+    def test_partial_encoding_replace(self):
+        """A string which can be partly encoded correctly should be"""
+        sout = _FakeOutputStream()
+        sout.encoding = "iso-8859-7"
+        unicode_output_stream(sout).write(self.uni)
+        self.assertEqual([_b("pa?\xe8?n")], sout.writelog)
+
+    @testtools.skipIf(str_is_unicode, "Tests behaviour when str is not unicode")
+    def test_unicode_encodings_wrapped_when_str_is_not_unicode(self):
+        """A unicode encoding is wrapped but needs no error handler"""
+        sout = _FakeOutputStream()
+        sout.encoding = "utf-8"
+        uout = unicode_output_stream(sout)
+        self.assertEqual(uout.errors, "strict")
+        uout.write(self.uni)
+        self.assertEqual([_b("pa\xc9\xaa\xce\xb8\xc9\x99n")], sout.writelog)
+
+    @testtools.skipIf(not str_is_unicode, "Tests behaviour when str is unicode")
+    def test_unicode_encodings_not_wrapped_when_str_is_unicode(self):
+        # No wrapping needed if native str type is unicode
+        sout = _FakeOutputStream()
+        sout.encoding = "utf-8"
+        uout = unicode_output_stream(sout)
+        self.assertIs(uout, sout)
+
+    def test_stringio(self):
+        """A StringIO object should maybe get an ascii native str type"""
+        try:
+            from cStringIO import StringIO
+            newio = False
+        except ImportError:
+            from io import StringIO
+            newio = True
+        sout = StringIO()
+        soutwrapper = unicode_output_stream(sout)
+        soutwrapper.write(self.uni)
+        if newio:
+            self.assertEqual(self.uni, sout.getvalue())
+        else:
+            self.assertEqual("pa???n", sout.getvalue())
+
+    def test_io_stringio(self):
+        # io.StringIO only accepts unicode so should be returned as itself.
+        s = io.StringIO()
+        self.assertEqual(s, unicode_output_stream(s))
+
+    def test_io_bytesio(self):
+        # io.BytesIO only accepts bytes so should be wrapped.
+        bytes_io = io.BytesIO()
+        self.assertThat(bytes_io, Not(Is(unicode_output_stream(bytes_io))))
+        # Will error if s was not wrapped properly.
+        unicode_output_stream(bytes_io).write(_u('foo'))
+
+    def test_io_textwrapper(self):
+        # textwrapper is unicode, should be returned as itself.
+        text_io = io.TextIOWrapper(io.BytesIO())
+        self.assertThat(unicode_output_stream(text_io), Is(text_io))
+        # To be sure...
+        unicode_output_stream(text_io).write(_u('foo'))
+
+
+class TestTextRepr(testtools.TestCase):
+    """Ensure in extending repr, basic behaviours are not being broken"""
+
+    ascii_examples = (
+        # Single character examples
+        #  C0 control codes should be escaped except multiline \n
+        ("\x00", "'\\x00'", "'''\\\n\\x00'''"),
+        ("\b", "'\\x08'", "'''\\\n\\x08'''"),
+        ("\t", "'\\t'", "'''\\\n\\t'''"),
+        ("\n", "'\\n'", "'''\\\n\n'''"),
+        ("\r", "'\\r'", "'''\\\n\\r'''"),
+        #  Quotes and backslash should match normal repr behaviour
+        ('"', "'\"'", "'''\\\n\"'''"),
+        ("'", "\"'\"", "'''\\\n\\''''"),
+        ("\\", "'\\\\'", "'''\\\n\\\\'''"),
+        #  DEL is also unprintable and should be escaped
+        ("\x7F", "'\\x7f'", "'''\\\n\\x7f'''"),
+
+        # Character combinations that need double checking
+        ("\r\n", "'\\r\\n'", "'''\\\n\\r\n'''"),
+        ("\"'", "'\"\\''", "'''\\\n\"\\''''"),
+        ("'\"", "'\\'\"'", "'''\\\n'\"'''"),
+        ("\\n", "'\\\\n'", "'''\\\n\\\\n'''"),
+        ("\\\n", "'\\\\\\n'", "'''\\\n\\\\\n'''"),
+        ("\\' ", "\"\\\\' \"", "'''\\\n\\\\' '''"),
+        ("\\'\n", "\"\\\\'\\n\"", "'''\\\n\\\\'\n'''"),
+        ("\\'\"", "'\\\\\\'\"'", "'''\\\n\\\\'\"'''"),
+        ("\\'''", "\"\\\\'''\"", "'''\\\n\\\\\\'\\'\\''''"),
+        )
+
+    # Bytes with the high bit set should always be escaped
+    bytes_examples = (
+        (_b("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+        (_b("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+        (_b("\xC0"), "'\\xc0'", "'''\\\n\\xc0'''"),
+        (_b("\xFF"), "'\\xff'", "'''\\\n\\xff'''"),
+        (_b("\xC2\xA7"), "'\\xc2\\xa7'", "'''\\\n\\xc2\\xa7'''"),
+        )
+
+    # Unicode doesn't escape printable characters as per the Python 3 model
+    unicode_examples = (
+        # C1 codes are unprintable
+        (_u("\x80"), "'\\x80'", "'''\\\n\\x80'''"),
+        (_u("\x9F"), "'\\x9f'", "'''\\\n\\x9f'''"),
+        # No-break space is unprintable
+        (_u("\xA0"), "'\\xa0'", "'''\\\n\\xa0'''"),
+        # Letters latin alphabets are printable
+        (_u("\xA1"), _u("'\xa1'"), _u("'''\\\n\xa1'''")),
+        (_u("\xFF"), _u("'\xff'"), _u("'''\\\n\xff'''")),
+        (_u("\u0100"), _u("'\u0100'"), _u("'''\\\n\u0100'''")),
+        # Line and paragraph seperators are unprintable
+        (_u("\u2028"), "'\\u2028'", "'''\\\n\\u2028'''"),
+        (_u("\u2029"), "'\\u2029'", "'''\\\n\\u2029'''"),
+        # Unpaired surrogates are unprintable
+        (_u("\uD800"), "'\\ud800'", "'''\\\n\\ud800'''"),
+        (_u("\uDFFF"), "'\\udfff'", "'''\\\n\\udfff'''"),
+        # Unprintable general categories not fully tested: Cc, Cf, Co, Cn, Zs
+        )
+
+    b_prefix = repr(_b(""))[:-2]
+    u_prefix = repr(_u(""))[:-2]
+
+    def test_ascii_examples_oneline_bytes(self):
+        for s, expected, _ in self.ascii_examples:
+            b = _b(s)
+            actual = text_repr(b, multiline=False)
+            # Add self.assertIsInstance check?
+            self.assertEqual(actual, self.b_prefix + expected)
+            self.assertEqual(eval(actual), b)
+
+    def test_ascii_examples_oneline_unicode(self):
+        for s, expected, _ in self.ascii_examples:
+            u = _u(s)
+            actual = text_repr(u, multiline=False)
+            self.assertEqual(actual, self.u_prefix + expected)
+            self.assertEqual(eval(actual), u)
+
+    def test_ascii_examples_multiline_bytes(self):
+        for s, _, expected in self.ascii_examples:
+            b = _b(s)
+            actual = text_repr(b, multiline=True)
+            self.assertEqual(actual, self.b_prefix + expected)
+            self.assertEqual(eval(actual), b)
+
+    def test_ascii_examples_multiline_unicode(self):
+        for s, _, expected in self.ascii_examples:
+            u = _u(s)
+            actual = text_repr(u, multiline=True)
+            self.assertEqual(actual, self.u_prefix + expected)
+            self.assertEqual(eval(actual), u)
+
+    def test_ascii_examples_defaultline_bytes(self):
+        for s, one, multi in self.ascii_examples:
+            expected = "\n" in s and multi or one
+            self.assertEqual(text_repr(_b(s)), self.b_prefix + expected)
+
+    def test_ascii_examples_defaultline_unicode(self):
+        for s, one, multi in self.ascii_examples:
+            expected = "\n" in s and multi or one
+            self.assertEqual(text_repr(_u(s)), self.u_prefix + expected)
+
+    def test_bytes_examples_oneline(self):
+        for b, expected, _ in self.bytes_examples:
+            actual = text_repr(b, multiline=False)
+            self.assertEqual(actual, self.b_prefix + expected)
+            self.assertEqual(eval(actual), b)
+
+    def test_bytes_examples_multiline(self):
+        for b, _, expected in self.bytes_examples:
+            actual = text_repr(b, multiline=True)
+            self.assertEqual(actual, self.b_prefix + expected)
+            self.assertEqual(eval(actual), b)
+
+    def test_unicode_examples_oneline(self):
+        for u, expected, _ in self.unicode_examples:
+            actual = text_repr(u, multiline=False)
+            self.assertEqual(actual, self.u_prefix + expected)
+            self.assertEqual(eval(actual), u)
+
+    def test_unicode_examples_multiline(self):
+        for u, _, expected in self.unicode_examples:
+            actual = text_repr(u, multiline=True)
+            self.assertEqual(actual, self.u_prefix + expected)
+            self.assertEqual(eval(actual), u)
+
+
+
+class TestReraise(testtools.TestCase):
+    """Tests for trivial reraise wrapper needed for Python 2/3 changes"""
+
+    def test_exc_info(self):
+        """After reraise exc_info matches plus some extra traceback"""
+        try:
+            raise ValueError("Bad value")
+        except ValueError:
+            _exc_info = sys.exc_info()
+        try:
+            reraise(*_exc_info)
+        except ValueError:
+            _new_exc_info = sys.exc_info()
+        self.assertIs(_exc_info[0], _new_exc_info[0])
+        self.assertIs(_exc_info[1], _new_exc_info[1])
+        expected_tb = traceback.extract_tb(_exc_info[2])
+        self.assertEqual(expected_tb,
+            traceback.extract_tb(_new_exc_info[2])[-len(expected_tb):])
+
+    def test_custom_exception_no_args(self):
+        """Reraising does not require args attribute to contain params"""
+
+        class CustomException(Exception):
+            """Exception that expects and sets attrs but not args"""
+
+            def __init__(self, value):
+                Exception.__init__(self)
+                self.value = value
+
+        try:
+            raise CustomException("Some value")
+        except CustomException:
+            _exc_info = sys.exc_info()
+        self.assertRaises(CustomException, reraise, *_exc_info)
+
+
+class Python2CompatibilityTests(testtools.TestCase):
+
+    def setUp(self):
+        super(Python2CompatibilityTests, self).setUp()
+        if sys.version[0] >= '3':
+            self.skip("These tests are only applicable to python 2.")
+
+
+class TestExceptionFormatting(Python2CompatibilityTests):
+    """Test the _format_exception_only function."""
+
+    def _assert_exception_format(self, eclass, evalue, expected):
+        actual = _format_exception_only(eclass, evalue)
+        self.assertThat(actual, Equals(expected))
+        self.assertThat(''.join(actual), IsInstance(unicode))
+
+    def test_supports_string_exception(self):
+        self._assert_exception_format(
+            "String_Exception",
+            None,
+            [_u("String_Exception\n")]
+        )
+
+    def test_supports_regular_exception(self):
+        self._assert_exception_format(
+            RuntimeError,
+            RuntimeError("Something went wrong"),
+            [_u("RuntimeError: Something went wrong\n")]
+        )
+
+    def test_supports_unprintable_exceptions(self):
+        """Verify support for exception classes that raise an exception when
+        __unicode__ or __str__ is called.
+        """
+        class UnprintableException(Exception):
+
+            def __str__(self):
+                raise Exception()
+
+            def __unicode__(self):
+                raise Exception()
+
+        self._assert_exception_format(
+            UnprintableException,
+            UnprintableException("Foo"),
+            [_u("UnprintableException: <unprintable UnprintableException object>\n")]
+        )
+
+    def test_supports_exceptions_with_no_string_value(self):
+        class NoStringException(Exception):
+
+            def __str__(self):
+                return ""
+
+            def __unicode__(self):
+                return _u("")
+
+        self._assert_exception_format(
+            NoStringException,
+            NoStringException("Foo"),
+            [_u("NoStringException\n")]
+        )
+
+    def test_supports_strange_syntax_error(self):
+        """Test support for syntax errors with unusual number of arguments"""
+        self._assert_exception_format(
+            SyntaxError,
+            SyntaxError("Message"),
+            [_u("SyntaxError: Message\n")]
+        )
+
+    def test_supports_syntax_error(self):
+        self._assert_exception_format(
+            SyntaxError,
+            SyntaxError(
+                "Some Syntax Message",
+                (
+                    "/path/to/file",
+                    12,
+                    2,
+                    "This is the line of code",
+                )
+            ),
+            [
+                _u('  File "/path/to/file", line 12\n'),
+                _u('    This is the line of code\n'),
+                _u('     ^\n'),
+                _u('SyntaxError: Some Syntax Message\n'),
+            ]
+        )
+
+
+class StackListFormattingTests(Python2CompatibilityTests):
+    """Test the _format_stack_list function."""
+
+    def _assert_stack_format(self, stack_lines, expected_output):
+        actual = _format_stack_list(stack_lines)
+        self.assertThat(actual, Equals([expected_output]))
+
+    def test_single_complete_stack_line(self):
+        stack_lines = [(
+            '/path/to/filename',
+            12,
+            'func_name',
+            'some_code()',
+        )]
+        expected = \
+            _u('  File "/path/to/filename", line 12, in func_name\n' \
+               '    some_code()\n')
+
+        self._assert_stack_format(stack_lines, expected)
+
+    def test_single_stack_line_no_code(self):
+        stack_lines = [(
+            '/path/to/filename',
+            12,
+            'func_name',
+            None
+        )]
+        expected = _u('  File "/path/to/filename", line 12, in func_name\n')
+        self._assert_stack_format(stack_lines, expected)
+
+
+class FormatExceptionInfoTests(Python2CompatibilityTests):
+
+    def test_individual_functions_called(self):
+        self.patch(
+            testtools.compat,
+            '_format_stack_list',
+            lambda stack_list: [_u("format stack list called\n")]
+        )
+        self.patch(
+            testtools.compat,
+            '_format_exception_only',
+            lambda etype, evalue: [_u("format exception only called\n")]
+        )
+        result = _format_exc_info(None, None, None)
+        expected = [
+            _u("Traceback (most recent call last):\n"),
+            _u("format stack list called\n"),
+            _u("format exception only called\n"),
+        ]
+        self.assertThat(expected, Equals(result))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_content.py b/third_party/testtools/testtools/tests/test_content.py
new file mode 100644
index 0000000..09feebd
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_content.py
@@ -0,0 +1,366 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+import json
+import os
+import tempfile
+import unittest
+
+from testtools import TestCase, skipUnless
+from testtools.compat import (
+    _b,
+    _u,
+    BytesIO,
+    StringIO,
+    str_is_unicode,
+    )
+from testtools.content import (
+    attach_file,
+    Content,
+    content_from_file,
+    content_from_stream,
+    JSON,
+    json_content,
+    StackLinesContent,
+    StacktraceContent,
+    TracebackContent,
+    text_content,
+    )
+from testtools.content_type import (
+    ContentType,
+    UTF8_TEXT,
+    )
+from testtools.matchers import (
+    Equals,
+    MatchesException,
+    Raises,
+    raises,
+    )
+from testtools.tests.helpers import an_exc_info
+
+
+raises_value_error = Raises(MatchesException(ValueError))
+
+
+class TestContent(TestCase):
+
+    def test___init___None_errors(self):
+        self.assertThat(lambda: Content(None, None), raises_value_error)
+        self.assertThat(
+            lambda: Content(None, lambda: ["traceback"]), raises_value_error)
+        self.assertThat(
+            lambda: Content(ContentType("text", "traceback"), None),
+            raises_value_error)
+
+    def test___init___sets_ivars(self):
+        content_type = ContentType("foo", "bar")
+        content = Content(content_type, lambda: ["bytes"])
+        self.assertEqual(content_type, content.content_type)
+        self.assertEqual(["bytes"], list(content.iter_bytes()))
+
+    def test___eq__(self):
+        content_type = ContentType("foo", "bar")
+        one_chunk = lambda: [_b("bytes")]
+        two_chunk = lambda: [_b("by"), _b("tes")]
+        content1 = Content(content_type, one_chunk)
+        content2 = Content(content_type, one_chunk)
+        content3 = Content(content_type, two_chunk)
+        content4 = Content(content_type, lambda: [_b("by"), _b("te")])
+        content5 = Content(ContentType("f", "b"), two_chunk)
+        self.assertEqual(content1, content2)
+        self.assertEqual(content1, content3)
+        self.assertNotEqual(content1, content4)
+        self.assertNotEqual(content1, content5)
+
+    def test___repr__(self):
+        content = Content(ContentType("application", "octet-stream"),
+            lambda: [_b("\x00bin"), _b("ary\xff")])
+        self.assertIn("\\x00binary\\xff", repr(content))
+
+    def test_iter_text_not_text_errors(self):
+        content_type = ContentType("foo", "bar")
+        content = Content(content_type, lambda: ["bytes"])
+        self.assertThat(content.iter_text, raises_value_error)
+
+    def test_iter_text_decodes(self):
+        content_type = ContentType("text", "strange", {"charset": "utf8"})
+        content = Content(
+            content_type, lambda: [_u("bytes\xea").encode("utf8")])
+        self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
+
+    def test_iter_text_default_charset_iso_8859_1(self):
+        content_type = ContentType("text", "strange")
+        text = _u("bytes\xea")
+        iso_version = text.encode("ISO-8859-1")
+        content = Content(content_type, lambda: [iso_version])
+        self.assertEqual([text], list(content.iter_text()))
+
+    def test_as_text(self):
+        content_type = ContentType("text", "strange", {"charset": "utf8"})
+        content = Content(
+            content_type, lambda: [_u("bytes\xea").encode("utf8")])
+        self.assertEqual(_u("bytes\xea"), content.as_text())
+
+    def test_from_file(self):
+        fd, path = tempfile.mkstemp()
+        self.addCleanup(os.remove, path)
+        os.write(fd, _b('some data'))
+        os.close(fd)
+        content = content_from_file(path, UTF8_TEXT, chunk_size=2)
+        self.assertThat(
+            list(content.iter_bytes()),
+            Equals([_b('so'), _b('me'), _b(' d'), _b('at'), _b('a')]))
+
+    def test_from_nonexistent_file(self):
+        directory = tempfile.mkdtemp()
+        nonexistent = os.path.join(directory, 'nonexistent-file')
+        content = content_from_file(nonexistent)
+        self.assertThat(content.iter_bytes, raises(IOError))
+
+    def test_from_file_default_type(self):
+        content = content_from_file('/nonexistent/path')
+        self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+    def test_from_file_eager_loading(self):
+        fd, path = tempfile.mkstemp()
+        os.write(fd, _b('some data'))
+        os.close(fd)
+        content = content_from_file(path, UTF8_TEXT, buffer_now=True)
+        os.remove(path)
+        self.assertThat(
+            ''.join(content.iter_text()), Equals('some data'))
+
+    def test_from_file_with_simple_seek(self):
+        f = tempfile.NamedTemporaryFile()
+        f.write(_b('some data'))
+        f.flush()
+        self.addCleanup(f.close)
+        content = content_from_file(
+            f.name, UTF8_TEXT, chunk_size=50, seek_offset=5)
+        self.assertThat(
+            list(content.iter_bytes()), Equals([_b('data')]))
+
+    def test_from_file_with_whence_seek(self):
+        f = tempfile.NamedTemporaryFile()
+        f.write(_b('some data'))
+        f.flush()
+        self.addCleanup(f.close)
+        content = content_from_file(
+            f.name, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+        self.assertThat(
+            list(content.iter_bytes()), Equals([_b('data')]))
+
+    def test_from_stream(self):
+        data = StringIO('some data')
+        content = content_from_stream(data, UTF8_TEXT, chunk_size=2)
+        self.assertThat(
+            list(content.iter_bytes()), Equals(['so', 'me', ' d', 'at', 'a']))
+
+    def test_from_stream_default_type(self):
+        data = StringIO('some data')
+        content = content_from_stream(data)
+        self.assertThat(content.content_type, Equals(UTF8_TEXT))
+
+    def test_from_stream_eager_loading(self):
+        fd, path = tempfile.mkstemp()
+        self.addCleanup(os.remove, path)
+        self.addCleanup(os.close, fd)
+        os.write(fd, _b('some data'))
+        stream = open(path, 'rb')
+        self.addCleanup(stream.close)
+        content = content_from_stream(stream, UTF8_TEXT, buffer_now=True)
+        os.write(fd, _b('more data'))
+        self.assertThat(
+            ''.join(content.iter_text()), Equals('some data'))
+
+    def test_from_stream_with_simple_seek(self):
+        data = BytesIO(_b('some data'))
+        content = content_from_stream(
+            data, UTF8_TEXT, chunk_size=50, seek_offset=5)
+        self.assertThat(
+            list(content.iter_bytes()), Equals([_b('data')]))
+
+    def test_from_stream_with_whence_seek(self):
+        data = BytesIO(_b('some data'))
+        content = content_from_stream(
+            data, UTF8_TEXT, chunk_size=50, seek_offset=-4, seek_whence=2)
+        self.assertThat(
+            list(content.iter_bytes()), Equals([_b('data')]))
+
+    def test_from_text(self):
+        data = _u("some data")
+        expected = Content(UTF8_TEXT, lambda: [data.encode('utf8')])
+        self.assertEqual(expected, text_content(data))
+
+    @skipUnless(str_is_unicode, "Test only applies in python 3.")
+    def test_text_content_raises_TypeError_when_passed_bytes(self):
+        data = _b("Some Bytes")
+        self.assertRaises(TypeError, text_content, data)
+
+    def test_text_content_raises_TypeError_when_passed_non_text(self):
+        bad_values = (None, list(), dict(), 42, 1.23)
+        for value in bad_values:
+            self.assertThat(
+                lambda: text_content(value),
+                raises(
+                    TypeError("text_content must be given text, not '%s'." %
+                        type(value).__name__)
+                ),
+            )
+
+    def test_json_content(self):
+        data = {'foo': 'bar'}
+        expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
+        self.assertEqual(expected, json_content(data))
+
+
+class TestStackLinesContent(TestCase):
+
+    def _get_stack_line_and_expected_output(self):
+        stack_lines = [
+            ('/path/to/file', 42, 'some_function', 'print("Hello World")'),
+        ]
+        expected = '  File "/path/to/file", line 42, in some_function\n' \
+                   '    print("Hello World")\n'
+        return stack_lines, expected
+
+    def test_single_stack_line(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        actual = StackLinesContent(stack_lines).as_text()
+
+        self.assertEqual(expected, actual)
+
+    def test_prefix_content(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        prefix = self.getUniqueString() + '\n'
+        content = StackLinesContent(stack_lines, prefix_content=prefix)
+        actual = content.as_text()
+        expected = prefix  + expected
+
+        self.assertEqual(expected, actual)
+
+    def test_postfix_content(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        postfix = '\n' + self.getUniqueString()
+        content = StackLinesContent(stack_lines, postfix_content=postfix)
+        actual = content.as_text()
+        expected = expected + postfix
+
+        self.assertEqual(expected, actual)
+
+    def test___init___sets_content_type(self):
+        stack_lines, expected = self._get_stack_line_and_expected_output()
+        content = StackLinesContent(stack_lines)
+        expected_content_type = ContentType("text", "x-traceback",
+            {"language": "python", "charset": "utf8"})
+
+        self.assertEqual(expected_content_type, content.content_type)
+
+
+class TestTracebackContent(TestCase):
+
+    def test___init___None_errors(self):
+        self.assertThat(
+            lambda: TracebackContent(None, None), raises_value_error)
+
+    def test___init___sets_ivars(self):
+        content = TracebackContent(an_exc_info, self)
+        content_type = ContentType("text", "x-traceback",
+            {"language": "python", "charset": "utf8"})
+        self.assertEqual(content_type, content.content_type)
+        result = unittest.TestResult()
+        expected = result._exc_info_to_string(an_exc_info, self)
+        self.assertEqual(expected, ''.join(list(content.iter_text())))
+
+
+class TestStacktraceContent(TestCase):
+
+    def test___init___sets_ivars(self):
+        content = StacktraceContent()
+        content_type = ContentType("text", "x-traceback",
+            {"language": "python", "charset": "utf8"})
+
+        self.assertEqual(content_type, content.content_type)
+
+    def test_prefix_is_used(self):
+        prefix = self.getUniqueString()
+        actual = StacktraceContent(prefix_content=prefix).as_text()
+
+        self.assertTrue(actual.startswith(prefix))
+
+    def test_postfix_is_used(self):
+        postfix = self.getUniqueString()
+        actual = StacktraceContent(postfix_content=postfix).as_text()
+
+        self.assertTrue(actual.endswith(postfix))
+
+    def test_top_frame_is_skipped_when_no_stack_is_specified(self):
+        actual = StacktraceContent().as_text()
+
+        self.assertTrue('testtools/content.py' not in actual)
+
+
+class TestAttachFile(TestCase):
+
+    def make_file(self, data):
+        # GZ 2011-04-21: This helper could be useful for methods above trying
+        #                to use mkstemp, but should handle write failures and
+        #                always close the fd. There must be a better way.
+        fd, path = tempfile.mkstemp()
+        self.addCleanup(os.remove, path)
+        os.write(fd, _b(data))
+        os.close(fd)
+        return path
+
+    def test_simple(self):
+        class SomeTest(TestCase):
+            def test_foo(self):
+                pass
+        test = SomeTest('test_foo')
+        data = 'some data'
+        path = self.make_file(data)
+        my_content = text_content(data)
+        attach_file(test, path, name='foo')
+        self.assertEqual({'foo': my_content}, test.getDetails())
+
+    def test_optional_name(self):
+        # If no name is provided, attach_file just uses the base name of the
+        # file.
+        class SomeTest(TestCase):
+            def test_foo(self):
+                pass
+        test = SomeTest('test_foo')
+        path = self.make_file('some data')
+        base_path = os.path.basename(path)
+        attach_file(test, path)
+        self.assertEqual([base_path], list(test.getDetails()))
+
+    def test_lazy_read(self):
+        class SomeTest(TestCase):
+            def test_foo(self):
+                pass
+        test = SomeTest('test_foo')
+        path = self.make_file('some data')
+        attach_file(test, path, name='foo', buffer_now=False)
+        content = test.getDetails()['foo']
+        content_file = open(path, 'w')
+        content_file.write('new data')
+        content_file.close()
+        self.assertEqual(''.join(content.iter_text()), 'new data')
+
+    def test_eager_read_by_default(self):
+        class SomeTest(TestCase):
+            def test_foo(self):
+                pass
+        test = SomeTest('test_foo')
+        path = self.make_file('some data')
+        attach_file(test, path, name='foo')
+        content = test.getDetails()['foo']
+        content_file = open(path, 'w')
+        content_file.write('new data')
+        content_file.close()
+        self.assertEqual(''.join(content.iter_text()), 'some data')
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_content_type.py b/third_party/testtools/testtools/tests/test_content_type.py
new file mode 100644
index 0000000..2d34f95
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_content_type.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2008, 2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.matchers import Equals, MatchesException, Raises
+from testtools.content_type import (
+    ContentType,
+    JSON,
+    UTF8_TEXT,
+    )
+
+
+class TestContentType(TestCase):
+
+    def test___init___None_errors(self):
+        raises_value_error = Raises(MatchesException(ValueError))
+        self.assertThat(lambda:ContentType(None, None), raises_value_error)
+        self.assertThat(lambda:ContentType(None, "traceback"),
+            raises_value_error)
+        self.assertThat(lambda:ContentType("text", None), raises_value_error)
+
+    def test___init___sets_ivars(self):
+        content_type = ContentType("foo", "bar")
+        self.assertEqual("foo", content_type.type)
+        self.assertEqual("bar", content_type.subtype)
+        self.assertEqual({}, content_type.parameters)
+
+    def test___init___with_parameters(self):
+        content_type = ContentType("foo", "bar", {"quux": "thing"})
+        self.assertEqual({"quux": "thing"}, content_type.parameters)
+
+    def test___eq__(self):
+        content_type1 = ContentType("foo", "bar", {"quux": "thing"})
+        content_type2 = ContentType("foo", "bar", {"quux": "thing"})
+        content_type3 = ContentType("foo", "bar", {"quux": "thing2"})
+        self.assertTrue(content_type1.__eq__(content_type2))
+        self.assertFalse(content_type1.__eq__(content_type3))
+
+    def test_basic_repr(self):
+        content_type = ContentType('text', 'plain')
+        self.assertThat(repr(content_type), Equals('text/plain'))
+
+    def test_extended_repr(self):
+        content_type = ContentType(
+            'text', 'plain', {'foo': 'bar', 'baz': 'qux'})
+        self.assertThat(
+            repr(content_type), Equals('text/plain; baz="qux"; foo="bar"'))
+
+
+class TestBuiltinContentTypes(TestCase):
+
+    def test_plain_text(self):
+        # The UTF8_TEXT content type represents UTF-8 encoded text/plain.
+        self.assertThat(UTF8_TEXT.type, Equals('text'))
+        self.assertThat(UTF8_TEXT.subtype, Equals('plain'))
+        self.assertThat(UTF8_TEXT.parameters, Equals({'charset': 'utf8'}))
+
+    def test_json_content(self):
+        # The JSON content type represents implictly UTF-8 application/json.
+        self.assertThat(JSON.type, Equals('application'))
+        self.assertThat(JSON.subtype, Equals('json'))
+        self.assertThat(JSON.parameters, Equals({}))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_deferredruntest.py b/third_party/testtools/testtools/tests/test_deferredruntest.py
new file mode 100644
index 0000000..3310926
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_deferredruntest.py
@@ -0,0 +1,777 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the DeferredRunTest single test execution logic."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+    skipIf,
+    TestCase,
+    TestResult,
+    )
+from testtools.content import (
+    text_content,
+    )
+from testtools.matchers import (
+    Equals,
+    KeysEqual,
+    MatchesException,
+    Raises,
+    )
+from testtools.runtest import RunTest
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.test_spinner import NeedsTwistedTestCase
+
+assert_fails_with = try_import('testtools.deferredruntest.assert_fails_with')
+AsynchronousDeferredRunTest = try_import(
+    'testtools.deferredruntest.AsynchronousDeferredRunTest')
+flush_logged_errors = try_import(
+    'testtools.deferredruntest.flush_logged_errors')
+SynchronousDeferredRunTest = try_import(
+    'testtools.deferredruntest.SynchronousDeferredRunTest')
+
+defer = try_import('twisted.internet.defer')
+failure = try_import('twisted.python.failure')
+log = try_import('twisted.python.log')
+DelayedCall = try_import('twisted.internet.base.DelayedCall')
+
+
+class X(object):
+    """Tests that we run as part of our tests, nested to avoid discovery."""
+
+    class Base(TestCase):
+        def setUp(self):
+            super(X.Base, self).setUp()
+            self.calls = ['setUp']
+            self.addCleanup(self.calls.append, 'clean-up')
+        def test_something(self):
+            self.calls.append('test')
+        def tearDown(self):
+            self.calls.append('tearDown')
+            super(X.Base, self).tearDown()
+
+    class BaseExceptionRaised(Base):
+        expected_calls = ['setUp', 'tearDown', 'clean-up']
+        expected_results = [('addError', SystemExit)]
+        def test_something(self):
+            raise SystemExit(0)
+
+    class ErrorInSetup(Base):
+        expected_calls = ['setUp', 'clean-up']
+        expected_results = [('addError', RuntimeError)]
+        def setUp(self):
+            super(X.ErrorInSetup, self).setUp()
+            raise RuntimeError("Error in setUp")
+
+    class ErrorInTest(Base):
+        expected_calls = ['setUp', 'tearDown', 'clean-up']
+        expected_results = [('addError', RuntimeError)]
+        def test_something(self):
+            raise RuntimeError("Error in test")
+
+    class FailureInTest(Base):
+        expected_calls = ['setUp', 'tearDown', 'clean-up']
+        expected_results = [('addFailure', AssertionError)]
+        def test_something(self):
+            self.fail("test failed")
+
+    class ErrorInTearDown(Base):
+        expected_calls = ['setUp', 'test', 'clean-up']
+        expected_results = [('addError', RuntimeError)]
+        def tearDown(self):
+            raise RuntimeError("Error in tearDown")
+
+    class ErrorInCleanup(Base):
+        expected_calls = ['setUp', 'test', 'tearDown', 'clean-up']
+        expected_results = [('addError', ZeroDivisionError)]
+        def test_something(self):
+            self.calls.append('test')
+            self.addCleanup(lambda: 1/0)
+
+    class TestIntegration(NeedsTwistedTestCase):
+
+        def assertResultsMatch(self, test, result):
+            events = list(result._events)
+            self.assertEqual(('startTest', test), events.pop(0))
+            for expected_result in test.expected_results:
+                result = events.pop(0)
+                if len(expected_result) == 1:
+                    self.assertEqual((expected_result[0], test), result)
+                else:
+                    self.assertEqual((expected_result[0], test), result[:2])
+                    error_type = expected_result[1]
+                    self.assertIn(error_type.__name__, str(result[2]))
+            self.assertEqual([('stopTest', test)], events)
+
+        def test_runner(self):
+            result = ExtendedTestResult()
+            test = self.test_factory('test_something', runTest=self.runner)
+            if self.test_factory is X.BaseExceptionRaised:
+                self.assertRaises(SystemExit, test.run, result)
+            else:
+                test.run(result)
+            self.assertEqual(test.calls, self.test_factory.expected_calls)
+            self.assertResultsMatch(test, result)
+
+
+def make_integration_tests():
+    from unittest import TestSuite
+    from testtools import clone_test_with_new_id
+    runners = [
+        ('RunTest', RunTest),
+        ('SynchronousDeferredRunTest', SynchronousDeferredRunTest),
+        ('AsynchronousDeferredRunTest', AsynchronousDeferredRunTest),
+        ]
+
+    tests = [
+        X.BaseExceptionRaised,
+        X.ErrorInSetup,
+        X.ErrorInTest,
+        X.ErrorInTearDown,
+        X.FailureInTest,
+        X.ErrorInCleanup,
+        ]
+    base_test = X.TestIntegration('test_runner')
+    integration_tests = []
+    for runner_name, runner in runners:
+        for test in tests:
+            new_test = clone_test_with_new_id(
+                base_test, '%s(%s, %s)' % (
+                    base_test.id(),
+                    runner_name,
+                    test.__name__))
+            new_test.test_factory = test
+            new_test.runner = runner
+            integration_tests.append(new_test)
+    return TestSuite(integration_tests)
+
+
+class TestSynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+    def make_result(self):
+        return ExtendedTestResult()
+
+    def make_runner(self, test):
+        return SynchronousDeferredRunTest(test, test.exception_handlers)
+
+    def test_success(self):
+        class SomeCase(TestCase):
+            def test_success(self):
+                return defer.succeed(None)
+        test = SomeCase('test_success')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            result._events, Equals([
+                ('startTest', test),
+                ('addSuccess', test),
+                ('stopTest', test)]))
+
+    def test_failure(self):
+        class SomeCase(TestCase):
+            def test_failure(self):
+                return defer.maybeDeferred(self.fail, "Egads!")
+        test = SomeCase('test_failure')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events], Equals([
+                ('startTest', test),
+                ('addFailure', test),
+                ('stopTest', test)]))
+
+    def test_setUp_followed_by_test(self):
+        class SomeCase(TestCase):
+            def setUp(self):
+                super(SomeCase, self).setUp()
+                return defer.succeed(None)
+            def test_failure(self):
+                return defer.maybeDeferred(self.fail, "Egads!")
+        test = SomeCase('test_failure')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events], Equals([
+                ('startTest', test),
+                ('addFailure', test),
+                ('stopTest', test)]))
+
+
+class TestAsynchronousDeferredRunTest(NeedsTwistedTestCase):
+
+    def make_reactor(self):
+        from twisted.internet import reactor
+        return reactor
+
+    def make_result(self):
+        return ExtendedTestResult()
+
+    def make_runner(self, test, timeout=None):
+        if timeout is None:
+            timeout = self.make_timeout()
+        return AsynchronousDeferredRunTest(
+            test, test.exception_handlers, timeout=timeout)
+
+    def make_timeout(self):
+        return 0.005
+
+    def test_setUp_returns_deferred_that_fires_later(self):
+        # setUp can return a Deferred that might fire at any time.
+        # AsynchronousDeferredRunTest will not go on to running the test until
+        # the Deferred returned by setUp actually fires.
+        call_log = []
+        marker = object()
+        d = defer.Deferred().addCallback(call_log.append)
+        class SomeCase(TestCase):
+            def setUp(self):
+                super(SomeCase, self).setUp()
+                call_log.append('setUp')
+                return d
+            def test_something(self):
+                call_log.append('test')
+        def fire_deferred():
+            self.assertThat(call_log, Equals(['setUp']))
+            d.callback(marker)
+        test = SomeCase('test_something')
+        timeout = self.make_timeout()
+        runner = self.make_runner(test, timeout=timeout)
+        result = self.make_result()
+        reactor = self.make_reactor()
+        reactor.callLater(timeout, fire_deferred)
+        runner.run(result)
+        self.assertThat(call_log, Equals(['setUp', marker, 'test']))
+
+    def test_calls_setUp_test_tearDown_in_sequence(self):
+        # setUp, the test method and tearDown can all return
+        # Deferreds. AsynchronousDeferredRunTest will make sure that each of
+        # these are run in turn, only going on to the next stage once the
+        # Deferred from the previous stage has fired.
+        call_log = []
+        a = defer.Deferred()
+        a.addCallback(lambda x: call_log.append('a'))
+        b = defer.Deferred()
+        b.addCallback(lambda x: call_log.append('b'))
+        c = defer.Deferred()
+        c.addCallback(lambda x: call_log.append('c'))
+        class SomeCase(TestCase):
+            def setUp(self):
+                super(SomeCase, self).setUp()
+                call_log.append('setUp')
+                return a
+            def test_success(self):
+                call_log.append('test')
+                return b
+            def tearDown(self):
+                super(SomeCase, self).tearDown()
+                call_log.append('tearDown')
+                return c
+        test = SomeCase('test_success')
+        timeout = self.make_timeout()
+        runner = self.make_runner(test, timeout)
+        result = self.make_result()
+        reactor = self.make_reactor()
+        def fire_a():
+            self.assertThat(call_log, Equals(['setUp']))
+            a.callback(None)
+        def fire_b():
+            self.assertThat(call_log, Equals(['setUp', 'a', 'test']))
+            b.callback(None)
+        def fire_c():
+            self.assertThat(
+                call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown']))
+            c.callback(None)
+        reactor.callLater(timeout * 0.25, fire_a)
+        reactor.callLater(timeout * 0.5, fire_b)
+        reactor.callLater(timeout * 0.75, fire_c)
+        runner.run(result)
+        self.assertThat(
+            call_log, Equals(['setUp', 'a', 'test', 'b', 'tearDown', 'c']))
+
+    def test_async_cleanups(self):
+        # Cleanups added with addCleanup can return
+        # Deferreds. AsynchronousDeferredRunTest will run each of them in
+        # turn.
+        class SomeCase(TestCase):
+            def test_whatever(self):
+                pass
+        test = SomeCase('test_whatever')
+        call_log = []
+        a = defer.Deferred().addCallback(lambda x: call_log.append('a'))
+        b = defer.Deferred().addCallback(lambda x: call_log.append('b'))
+        c = defer.Deferred().addCallback(lambda x: call_log.append('c'))
+        test.addCleanup(lambda: a)
+        test.addCleanup(lambda: b)
+        test.addCleanup(lambda: c)
+        def fire_a():
+            self.assertThat(call_log, Equals([]))
+            a.callback(None)
+        def fire_b():
+            self.assertThat(call_log, Equals(['a']))
+            b.callback(None)
+        def fire_c():
+            self.assertThat(call_log, Equals(['a', 'b']))
+            c.callback(None)
+        timeout = self.make_timeout()
+        reactor = self.make_reactor()
+        reactor.callLater(timeout * 0.25, fire_a)
+        reactor.callLater(timeout * 0.5, fire_b)
+        reactor.callLater(timeout * 0.75, fire_c)
+        runner = self.make_runner(test, timeout)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(call_log, Equals(['a', 'b', 'c']))
+
+    def test_clean_reactor(self):
+        # If there's cruft left over in the reactor, the test fails.
+        reactor = self.make_reactor()
+        timeout = self.make_timeout()
+        class SomeCase(TestCase):
+            def test_cruft(self):
+                reactor.callLater(timeout * 10.0, lambda: None)
+        test = SomeCase('test_cruft')
+        runner = self.make_runner(test, timeout)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events],
+            Equals(
+                [('startTest', test),
+                 ('addError', test),
+                 ('stopTest', test)]))
+        error = result._events[1][2]
+        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+    def test_exports_reactor(self):
+        # The reactor is set as an attribute on the test case.
+        reactor = self.make_reactor()
+        timeout = self.make_timeout()
+        class SomeCase(TestCase):
+            def test_cruft(self):
+                self.assertIs(reactor, self.reactor)
+        test = SomeCase('test_cruft')
+        runner = self.make_runner(test, timeout)
+        result = TestResult()
+        runner.run(result)
+        self.assertEqual([], result.errors)
+        self.assertEqual([], result.failures)
+
+    def test_unhandled_error_from_deferred(self):
+        # If there's a Deferred with an unhandled error, the test fails.  Each
+        # unhandled error is reported with a separate traceback.
+        class SomeCase(TestCase):
+            def test_cruft(self):
+                # Note we aren't returning the Deferred so that the error will
+                # be unhandled.
+                defer.maybeDeferred(lambda: 1/0)
+                defer.maybeDeferred(lambda: 2/0)
+        test = SomeCase('test_cruft')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        error = result._events[1][2]
+        result._events[1] = ('addError', test, None)
+        self.assertThat(result._events, Equals(
+            [('startTest', test),
+             ('addError', test, None),
+             ('stopTest', test)]))
+        self.assertThat(
+            error, KeysEqual(
+                'twisted-log',
+                'unhandled-error-in-deferred',
+                'unhandled-error-in-deferred-1',
+                ))
+
+    def test_unhandled_error_from_deferred_combined_with_error(self):
+        # If there's a Deferred with an unhandled error, the test fails.  Each
+        # unhandled error is reported with a separate traceback, and the error
+        # is still reported.
+        class SomeCase(TestCase):
+            def test_cruft(self):
+                # Note we aren't returning the Deferred so that the error will
+                # be unhandled.
+                defer.maybeDeferred(lambda: 1/0)
+                2 / 0
+        test = SomeCase('test_cruft')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        error = result._events[1][2]
+        result._events[1] = ('addError', test, None)
+        self.assertThat(result._events, Equals(
+            [('startTest', test),
+             ('addError', test, None),
+             ('stopTest', test)]))
+        self.assertThat(
+            error, KeysEqual(
+                'traceback',
+                'twisted-log',
+                'unhandled-error-in-deferred',
+                ))
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_keyboard_interrupt_stops_test_run(self):
+        # If we get a SIGINT during a test run, the test stops and no more
+        # tests run.
+        SIGINT = getattr(signal, 'SIGINT', None)
+        if not SIGINT:
+            raise self.skipTest("SIGINT unavailable")
+        class SomeCase(TestCase):
+            def test_pause(self):
+                return defer.Deferred()
+        test = SomeCase('test_pause')
+        reactor = self.make_reactor()
+        timeout = self.make_timeout()
+        runner = self.make_runner(test, timeout * 5)
+        result = self.make_result()
+        reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+        self.assertThat(lambda:runner.run(result),
+            Raises(MatchesException(KeyboardInterrupt)))
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_fast_keyboard_interrupt_stops_test_run(self):
+        # If we get a SIGINT during a test run, the test stops and no more
+        # tests run.
+        SIGINT = getattr(signal, 'SIGINT', None)
+        if not SIGINT:
+            raise self.skipTest("SIGINT unavailable")
+        class SomeCase(TestCase):
+            def test_pause(self):
+                return defer.Deferred()
+        test = SomeCase('test_pause')
+        reactor = self.make_reactor()
+        timeout = self.make_timeout()
+        runner = self.make_runner(test, timeout * 5)
+        result = self.make_result()
+        reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+        self.assertThat(lambda:runner.run(result),
+            Raises(MatchesException(KeyboardInterrupt)))
+
+    def test_timeout_causes_test_error(self):
+        # If a test times out, it reports itself as having failed with a
+        # TimeoutError.
+        class SomeCase(TestCase):
+            def test_pause(self):
+                return defer.Deferred()
+        test = SomeCase('test_pause')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        error = result._events[1][2]
+        self.assertThat(
+            [event[:2] for event in result._events], Equals(
+            [('startTest', test),
+             ('addError', test),
+             ('stopTest', test)]))
+        self.assertIn('TimeoutError', str(error['traceback']))
+
+    def test_convenient_construction(self):
+        # As a convenience method, AsynchronousDeferredRunTest has a
+        # classmethod that returns an AsynchronousDeferredRunTest
+        # factory. This factory has the same API as the RunTest constructor.
+        reactor = object()
+        timeout = object()
+        handler = object()
+        factory = AsynchronousDeferredRunTest.make_factory(reactor, timeout)
+        runner = factory(self, [handler])
+        self.assertIs(reactor, runner._reactor)
+        self.assertIs(timeout, runner._timeout)
+        self.assertIs(self, runner.case)
+        self.assertEqual([handler], runner.handlers)
+
+    def test_use_convenient_factory(self):
+        # Make sure that the factory can actually be used.
+        factory = AsynchronousDeferredRunTest.make_factory()
+        class SomeCase(TestCase):
+            run_tests_with = factory
+            def test_something(self):
+                pass
+        case = SomeCase('test_something')
+        case.run()
+
+    def test_convenient_construction_default_reactor(self):
+        # As a convenience method, AsynchronousDeferredRunTest has a
+        # classmethod that returns an AsynchronousDeferredRunTest
+        # factory. This factory has the same API as the RunTest constructor.
+        reactor = object()
+        handler = object()
+        factory = AsynchronousDeferredRunTest.make_factory(reactor=reactor)
+        runner = factory(self, [handler])
+        self.assertIs(reactor, runner._reactor)
+        self.assertIs(self, runner.case)
+        self.assertEqual([handler], runner.handlers)
+
+    def test_convenient_construction_default_timeout(self):
+        # As a convenience method, AsynchronousDeferredRunTest has a
+        # classmethod that returns an AsynchronousDeferredRunTest
+        # factory. This factory has the same API as the RunTest constructor.
+        timeout = object()
+        handler = object()
+        factory = AsynchronousDeferredRunTest.make_factory(timeout=timeout)
+        runner = factory(self, [handler])
+        self.assertIs(timeout, runner._timeout)
+        self.assertIs(self, runner.case)
+        self.assertEqual([handler], runner.handlers)
+
+    def test_convenient_construction_default_debugging(self):
+        # As a convenience method, AsynchronousDeferredRunTest has a
+        # classmethod that returns an AsynchronousDeferredRunTest
+        # factory. This factory has the same API as the RunTest constructor.
+        handler = object()
+        factory = AsynchronousDeferredRunTest.make_factory(debug=True)
+        runner = factory(self, [handler])
+        self.assertIs(self, runner.case)
+        self.assertEqual([handler], runner.handlers)
+        self.assertEqual(True, runner._debug)
+
+    def test_deferred_error(self):
+        class SomeTest(TestCase):
+            def test_something(self):
+                return defer.maybeDeferred(lambda: 1/0)
+        test = SomeTest('test_something')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events],
+            Equals([
+                ('startTest', test),
+                ('addError', test),
+                ('stopTest', test)]))
+        error = result._events[1][2]
+        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+    def test_only_addError_once(self):
+        # Even if the reactor is unclean and the test raises an error and the
+        # cleanups raise errors, we only called addError once per test.
+        reactor = self.make_reactor()
+        class WhenItRains(TestCase):
+            def it_pours(self):
+                # Add a dirty cleanup.
+                self.addCleanup(lambda: 3 / 0)
+                # Dirty the reactor.
+                from twisted.internet.protocol import ServerFactory
+                reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+                # Unhandled error.
+                defer.maybeDeferred(lambda: 2 / 0)
+                # Actual error.
+                raise RuntimeError("Excess precipitation")
+        test = WhenItRains('it_pours')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events],
+            Equals([
+                ('startTest', test),
+                ('addError', test),
+                ('stopTest', test)]))
+        error = result._events[1][2]
+        self.assertThat(
+            error, KeysEqual(
+                'traceback',
+                'traceback-1',
+                'traceback-2',
+                'twisted-log',
+                'unhandled-error-in-deferred',
+                ))
+
+    def test_log_err_is_error(self):
+        # An error logged during the test run is recorded as an error in the
+        # tests.
+        class LogAnError(TestCase):
+            def test_something(self):
+                try:
+                    1/0
+                except ZeroDivisionError:
+                    f = failure.Failure()
+                log.err(f)
+        test = LogAnError('test_something')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events],
+            Equals([
+                ('startTest', test),
+                ('addError', test),
+                ('stopTest', test)]))
+        error = result._events[1][2]
+        self.assertThat(error, KeysEqual('logged-error', 'twisted-log'))
+
+    def test_log_err_flushed_is_success(self):
+        # An error logged during the test run is recorded as an error in the
+        # tests.
+        class LogAnError(TestCase):
+            def test_something(self):
+                try:
+                    1/0
+                except ZeroDivisionError:
+                    f = failure.Failure()
+                log.err(f)
+                flush_logged_errors(ZeroDivisionError)
+        test = LogAnError('test_something')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            result._events,
+            Equals([
+                ('startTest', test),
+                ('addSuccess', test, {'twisted-log': text_content('')}),
+                ('stopTest', test)]))
+
+    def test_log_in_details(self):
+        class LogAnError(TestCase):
+            def test_something(self):
+                log.msg("foo")
+                1/0
+        test = LogAnError('test_something')
+        runner = self.make_runner(test)
+        result = self.make_result()
+        runner.run(result)
+        self.assertThat(
+            [event[:2] for event in result._events],
+            Equals([
+                ('startTest', test),
+                ('addError', test),
+                ('stopTest', test)]))
+        error = result._events[1][2]
+        self.assertThat(error, KeysEqual('traceback', 'twisted-log'))
+
+    def test_debugging_unchanged_during_test_by_default(self):
+        debugging = [(defer.Deferred.debug, DelayedCall.debug)]
+        class SomeCase(TestCase):
+            def test_debugging_enabled(self):
+                debugging.append((defer.Deferred.debug, DelayedCall.debug))
+        test = SomeCase('test_debugging_enabled')
+        runner = AsynchronousDeferredRunTest(
+            test, handlers=test.exception_handlers,
+            reactor=self.make_reactor(), timeout=self.make_timeout())
+        runner.run(self.make_result())
+        self.assertEqual(debugging[0], debugging[1])
+
+    def test_debugging_enabled_during_test_with_debug_flag(self):
+        self.patch(defer.Deferred, 'debug', False)
+        self.patch(DelayedCall, 'debug', False)
+        debugging = []
+        class SomeCase(TestCase):
+            def test_debugging_enabled(self):
+                debugging.append((defer.Deferred.debug, DelayedCall.debug))
+        test = SomeCase('test_debugging_enabled')
+        runner = AsynchronousDeferredRunTest(
+            test, handlers=test.exception_handlers,
+            reactor=self.make_reactor(), timeout=self.make_timeout(),
+            debug=True)
+        runner.run(self.make_result())
+        self.assertEqual([(True, True)], debugging)
+        self.assertEqual(False, defer.Deferred.debug)
+        self.assertEqual(False, defer.Deferred.debug)
+
+
+class TestAssertFailsWith(NeedsTwistedTestCase):
+    """Tests for `assert_fails_with`."""
+
+    if SynchronousDeferredRunTest is not None:
+        run_tests_with = SynchronousDeferredRunTest
+
+    def test_assert_fails_with_success(self):
+        # assert_fails_with fails the test if it's given a Deferred that
+        # succeeds.
+        marker = object()
+        d = assert_fails_with(defer.succeed(marker), RuntimeError)
+        def check_result(failure):
+            failure.trap(self.failureException)
+            self.assertThat(
+                str(failure.value),
+                Equals("RuntimeError not raised (%r returned)" % (marker,)))
+        d.addCallbacks(
+            lambda x: self.fail("Should not have succeeded"), check_result)
+        return d
+
+    def test_assert_fails_with_success_multiple_types(self):
+        # assert_fails_with fails the test if it's given a Deferred that
+        # succeeds.
+        marker = object()
+        d = assert_fails_with(
+            defer.succeed(marker), RuntimeError, ZeroDivisionError)
+        def check_result(failure):
+            failure.trap(self.failureException)
+            self.assertThat(
+                str(failure.value),
+                Equals("RuntimeError, ZeroDivisionError not raised "
+                       "(%r returned)" % (marker,)))
+        d.addCallbacks(
+            lambda x: self.fail("Should not have succeeded"), check_result)
+        return d
+
+    def test_assert_fails_with_wrong_exception(self):
+        # assert_fails_with fails the test if it's given a Deferred that
+        # succeeds.
+        d = assert_fails_with(
+            defer.maybeDeferred(lambda: 1/0), RuntimeError, KeyboardInterrupt)
+        def check_result(failure):
+            failure.trap(self.failureException)
+            lines = str(failure.value).splitlines()
+            self.assertThat(
+                lines[:2],
+                Equals([
+                    ("ZeroDivisionError raised instead of RuntimeError, "
+                     "KeyboardInterrupt:"),
+                    " Traceback (most recent call last):",
+                    ]))
+        d.addCallbacks(
+            lambda x: self.fail("Should not have succeeded"), check_result)
+        return d
+
+    def test_assert_fails_with_expected_exception(self):
+        # assert_fails_with calls back with the value of the failure if it's
+        # one of the expected types of failures.
+        try:
+            1/0
+        except ZeroDivisionError:
+            f = failure.Failure()
+        d = assert_fails_with(defer.fail(f), ZeroDivisionError)
+        return d.addCallback(self.assertThat, Equals(f.value))
+
+    def test_custom_failure_exception(self):
+        # If assert_fails_with is passed a 'failureException' keyword
+        # argument, then it will raise that instead of `AssertionError`.
+        class CustomException(Exception):
+            pass
+        marker = object()
+        d = assert_fails_with(
+            defer.succeed(marker), RuntimeError,
+            failureException=CustomException)
+        def check_result(failure):
+            failure.trap(CustomException)
+            self.assertThat(
+                str(failure.value),
+                Equals("RuntimeError not raised (%r returned)" % (marker,)))
+        return d.addCallbacks(
+            lambda x: self.fail("Should not have succeeded"), check_result)
+
+
+class TestRunWithLogObservers(NeedsTwistedTestCase):
+
+    def test_restores_observers(self):
+        from testtools.deferredruntest import run_with_log_observers
+        from twisted.python import log
+        # Make sure there's at least one observer.  This reproduces bug
+        # #926189.
+        log.addObserver(lambda *args: None)
+        observers = list(log.theLogPublisher.observers)
+        run_with_log_observers([], lambda: None)
+        self.assertEqual(observers, log.theLogPublisher.observers)
+
+
+def test_suite():
+    from unittest import TestLoader, TestSuite
+    return TestSuite(
+        [TestLoader().loadTestsFromName(__name__),
+         make_integration_tests()])
diff --git a/third_party/testtools/testtools/tests/test_distutilscmd.py b/third_party/testtools/testtools/tests/test_distutilscmd.py
new file mode 100644
index 0000000..fd0dd90
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_distutilscmd.py
@@ -0,0 +1,100 @@
+# Copyright (c) 2010-2011 Testtools authors. See LICENSE for details.
+
+"""Tests for the distutils test command logic."""
+
+from distutils.dist import Distribution
+
+from extras import try_import
+
+from testtools.compat import (
+    _b,
+    _u,
+    BytesIO,
+    )
+fixtures = try_import('fixtures')
+
+import testtools
+from testtools import TestCase
+from testtools.distutilscmd import TestCommand
+from testtools.matchers import MatchesRegex
+
+
+if fixtures:
+    class SampleTestFixture(fixtures.Fixture):
+        """Creates testtools.runexample temporarily."""
+
+        def __init__(self):
+            self.package = fixtures.PythonPackage(
+            'runexample', [('__init__.py', _b("""
+from testtools import TestCase
+
+class TestFoo(TestCase):
+    def test_bar(self):
+        pass
+    def test_quux(self):
+        pass
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
+"""))])
+
+        def setUp(self):
+            super(SampleTestFixture, self).setUp()
+            self.useFixture(self.package)
+            testtools.__path__.append(self.package.base)
+            self.addCleanup(testtools.__path__.remove, self.package.base)
+
+
+class TestCommandTest(TestCase):
+
+    def setUp(self):
+        super(TestCommandTest, self).setUp()
+        if fixtures is None:
+            self.skipTest("Need fixtures")
+
+    def test_test_module(self):
+        self.useFixture(SampleTestFixture())
+        stdout = self.useFixture(fixtures.StringStream('stdout'))
+        dist = Distribution()
+        dist.script_name = 'setup.py'
+        dist.script_args = ['test']
+        dist.cmdclass = {'test': TestCommand}
+        dist.command_options = {
+            'test': {'test_module': ('command line', 'testtools.runexample')}}
+        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            cmd = dist.reinitialize_command('test')
+            dist.run_command('test')
+        self.assertThat(
+            stdout.getDetails()['stdout'].as_text(),
+            MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+    def test_test_suite(self):
+        self.useFixture(SampleTestFixture())
+        stdout = self.useFixture(fixtures.StringStream('stdout'))
+        dist = Distribution()
+        dist.script_name = 'setup.py'
+        dist.script_args = ['test']
+        dist.cmdclass = {'test': TestCommand}
+        dist.command_options = {
+            'test': {
+                'test_suite': (
+                    'command line', 'testtools.runexample.test_suite')}}
+        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            cmd = dist.reinitialize_command('test')
+            dist.run_command('test')
+        self.assertThat(
+            stdout.getDetails()['stdout'].as_text(),
+            MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_fixturesupport.py b/third_party/testtools/testtools/tests/test_fixturesupport.py
new file mode 100644
index 0000000..e309045
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_fixturesupport.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2010-2011 testtools developers. See LICENSE for details.
+
+import unittest
+
+from extras import try_import
+
+from testtools import (
+    TestCase,
+    content,
+    content_type,
+    )
+from testtools.compat import _b, _u
+from testtools.matchers import Contains
+from testtools.testresult.doubles import (
+    ExtendedTestResult,
+    )
+
+fixtures = try_import('fixtures')
+LoggingFixture = try_import('fixtures.tests.helpers.LoggingFixture')
+
+
+class TestFixtureSupport(TestCase):
+
+    def setUp(self):
+        super(TestFixtureSupport, self).setUp()
+        if fixtures is None or LoggingFixture is None:
+            self.skipTest("Need fixtures")
+
+    def test_useFixture(self):
+        fixture = LoggingFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = unittest.TestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertTrue(result.wasSuccessful())
+        self.assertEqual(['setUp', 'cleanUp'], fixture.calls)
+
+    def test_useFixture_cleanups_raise_caught(self):
+        calls = []
+        def raiser(ignored):
+            calls.append('called')
+            raise Exception('foo')
+        fixture = fixtures.FunctionFixture(lambda:None, raiser)
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = unittest.TestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertFalse(result.wasSuccessful())
+        self.assertEqual(['called'], calls)
+
+    def test_useFixture_details_captured(self):
+        class DetailsFixture(fixtures.Fixture):
+            def setUp(self):
+                fixtures.Fixture.setUp(self)
+                self.addCleanup(delattr, self, 'content')
+                self.content = [_b('content available until cleanUp')]
+                self.addDetail('content',
+                    content.Content(content_type.UTF8_TEXT, self.get_content))
+            def get_content(self):
+                return self.content
+        fixture = DetailsFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+                # Add a colliding detail (both should show up)
+                self.addDetail('content',
+                    content.Content(content_type.UTF8_TEXT, lambda:[_b('foo')]))
+        result = ExtendedTestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertEqual('addSuccess', result._events[-2][0])
+        details = result._events[-2][2]
+        self.assertEqual(['content', 'content-1'], sorted(details.keys()))
+        self.assertEqual('foo', details['content'].as_text())
+        self.assertEqual('content available until cleanUp',
+            details['content-1'].as_text())
+
+    def test_useFixture_multiple_details_captured(self):
+        class DetailsFixture(fixtures.Fixture):
+            def setUp(self):
+                fixtures.Fixture.setUp(self)
+                self.addDetail('aaa', content.text_content("foo"))
+                self.addDetail('bbb', content.text_content("bar"))
+        fixture = DetailsFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = ExtendedTestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertEqual('addSuccess', result._events[-2][0])
+        details = result._events[-2][2]
+        self.assertEqual(['aaa', 'bbb'], sorted(details))
+        self.assertEqual(_u('foo'), details['aaa'].as_text())
+        self.assertEqual(_u('bar'), details['bbb'].as_text())
+
+    def test_useFixture_details_captured_from_setUp(self):
+        # Details added during fixture set-up are gathered even if setUp()
+        # fails with an exception.
+        class BrokenFixture(fixtures.Fixture):
+            def setUp(self):
+                fixtures.Fixture.setUp(self)
+                self.addDetail('content', content.text_content("foobar"))
+                raise Exception()
+        fixture = BrokenFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = ExtendedTestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertEqual('addError', result._events[-2][0])
+        details = result._events[-2][2]
+        self.assertEqual(['content', 'traceback'], sorted(details))
+        self.assertEqual('foobar', ''.join(details['content'].iter_text()))
+
+    def test_useFixture_original_exception_raised_if_gather_details_fails(self):
+        # In bug #1368440 it was reported that when a fixture fails setUp
+        # and gather_details errors on it, then the original exception that
+        # failed is not reported.
+        class BrokenFixture(fixtures.Fixture):
+            def getDetails(self):
+                raise AttributeError("getDetails broke")
+            def setUp(self):
+                fixtures.Fixture.setUp(self)
+                raise Exception("setUp broke")
+        fixture = BrokenFixture()
+        class SimpleTest(TestCase):
+            def test_foo(self):
+                self.useFixture(fixture)
+        result = ExtendedTestResult()
+        SimpleTest('test_foo').run(result)
+        self.assertEqual('addError', result._events[-2][0])
+        details = result._events[-2][2]
+        self.assertEqual(['traceback', 'traceback-1'], sorted(details))
+        self.assertThat(
+            ''.join(details['traceback'].iter_text()),
+            Contains('setUp broke'))
+        self.assertThat(
+            ''.join(details['traceback-1'].iter_text()),
+            Contains('getDetails broke'))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_helpers.py b/third_party/testtools/testtools/tests/test_helpers.py
new file mode 100644
index 0000000..848c2f0
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_helpers.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2010-2012 testtools developers. See LICENSE for details.
+
+from testtools import TestCase
+from testtools.tests.helpers import (
+    FullStackRunTest,
+    hide_testtools_stack,
+    is_stack_hidden,
+    )
+
+
+class TestStackHiding(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def setUp(self):
+        super(TestStackHiding, self).setUp()
+        self.addCleanup(hide_testtools_stack, is_stack_hidden())
+
+    def test_is_stack_hidden_consistent_true(self):
+        hide_testtools_stack(True)
+        self.assertEqual(True, is_stack_hidden())
+
+    def test_is_stack_hidden_consistent_false(self):
+        hide_testtools_stack(False)
+        self.assertEqual(False, is_stack_hidden())
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_monkey.py b/third_party/testtools/testtools/tests/test_monkey.py
new file mode 100644
index 0000000..540a2ee
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_monkey.py
@@ -0,0 +1,167 @@
+# Copyright (c) 2010 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""Tests for testtools.monkey."""
+
+from testtools import TestCase
+from testtools.matchers import MatchesException, Raises
+from testtools.monkey import MonkeyPatcher, patch
+
+
+class TestObj:
+
+    def __init__(self):
+        self.foo = 'foo value'
+        self.bar = 'bar value'
+        self.baz = 'baz value'
+
+
+class MonkeyPatcherTest(TestCase):
+    """
+    Tests for 'MonkeyPatcher' monkey-patching class.
+    """
+
+    def setUp(self):
+        super(MonkeyPatcherTest, self).setUp()
+        self.test_object = TestObj()
+        self.original_object = TestObj()
+        self.monkey_patcher = MonkeyPatcher()
+
+    def test_empty(self):
+        # A monkey patcher without patches doesn't change a thing.
+        self.monkey_patcher.patch()
+
+        # We can't assert that all state is unchanged, but at least we can
+        # check our test object.
+        self.assertEquals(self.original_object.foo, self.test_object.foo)
+        self.assertEquals(self.original_object.bar, self.test_object.bar)
+        self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+    def test_construct_with_patches(self):
+        # Constructing a 'MonkeyPatcher' with patches adds all of the given
+        # patches to the patch list.
+        patcher = MonkeyPatcher((self.test_object, 'foo', 'haha'),
+                                (self.test_object, 'bar', 'hehe'))
+        patcher.patch()
+        self.assertEquals('haha', self.test_object.foo)
+        self.assertEquals('hehe', self.test_object.bar)
+        self.assertEquals(self.original_object.baz, self.test_object.baz)
+
+    def test_patch_existing(self):
+        # Patching an attribute that exists sets it to the value defined in the
+        # patch.
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+        self.monkey_patcher.patch()
+        self.assertEquals(self.test_object.foo, 'haha')
+
+    def test_patch_non_existing(self):
+        # Patching a non-existing attribute sets it to the value defined in
+        # the patch.
+        self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+        self.monkey_patcher.patch()
+        self.assertEquals(self.test_object.doesntexist, 'value')
+
+    def test_restore_non_existing(self):
+        # Restoring a value that didn't exist before the patch deletes the
+        # value.
+        self.monkey_patcher.add_patch(self.test_object, 'doesntexist', 'value')
+        self.monkey_patcher.patch()
+        self.monkey_patcher.restore()
+        marker = object()
+        self.assertIs(marker, getattr(self.test_object, 'doesntexist', marker))
+
+    def test_patch_already_patched(self):
+        # Adding a patch for an object and attribute that already have a patch
+        # overrides the existing patch.
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'BLAH')
+        self.monkey_patcher.patch()
+        self.assertEquals(self.test_object.foo, 'BLAH')
+        self.monkey_patcher.restore()
+        self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+    def test_restore_twice_is_a_no_op(self):
+        # Restoring an already-restored monkey patch is a no-op.
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'blah')
+        self.monkey_patcher.patch()
+        self.monkey_patcher.restore()
+        self.assertEquals(self.test_object.foo, self.original_object.foo)
+        self.monkey_patcher.restore()
+        self.assertEquals(self.test_object.foo, self.original_object.foo)
+
+    def test_run_with_patches_decoration(self):
+        # run_with_patches runs the given callable, passing in all arguments
+        # and keyword arguments, and returns the return value of the callable.
+        log = []
+
+        def f(a, b, c=None):
+            log.append((a, b, c))
+            return 'foo'
+
+        result = self.monkey_patcher.run_with_patches(f, 1, 2, c=10)
+        self.assertEquals('foo', result)
+        self.assertEquals([(1, 2, 10)], log)
+
+    def test_repeated_run_with_patches(self):
+        # We can call the same function with run_with_patches more than
+        # once. All patches apply for each call.
+        def f():
+            return (self.test_object.foo, self.test_object.bar,
+                    self.test_object.baz)
+
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+        result = self.monkey_patcher.run_with_patches(f)
+        self.assertEquals(
+            ('haha', self.original_object.bar, self.original_object.baz),
+            result)
+        result = self.monkey_patcher.run_with_patches(f)
+        self.assertEquals(
+            ('haha', self.original_object.bar, self.original_object.baz),
+            result)
+
+    def test_run_with_patches_restores(self):
+        # run_with_patches restores the original values after the function has
+        # executed.
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+        self.assertEquals(self.original_object.foo, self.test_object.foo)
+        self.monkey_patcher.run_with_patches(lambda: None)
+        self.assertEquals(self.original_object.foo, self.test_object.foo)
+
+    def test_run_with_patches_restores_on_exception(self):
+        # run_with_patches restores the original values even when the function
+        # raises an exception.
+        def _():
+            self.assertEquals(self.test_object.foo, 'haha')
+            self.assertEquals(self.test_object.bar, 'blahblah')
+            raise RuntimeError("Something went wrong!")
+
+        self.monkey_patcher.add_patch(self.test_object, 'foo', 'haha')
+        self.monkey_patcher.add_patch(self.test_object, 'bar', 'blahblah')
+
+        self.assertThat(lambda:self.monkey_patcher.run_with_patches(_),
+            Raises(MatchesException(RuntimeError("Something went wrong!"))))
+        self.assertEquals(self.test_object.foo, self.original_object.foo)
+        self.assertEquals(self.test_object.bar, self.original_object.bar)
+
+
+class TestPatchHelper(TestCase):
+
+    def test_patch_patches(self):
+        # patch(obj, name, value) sets obj.name to value.
+        test_object = TestObj()
+        patch(test_object, 'foo', 42)
+        self.assertEqual(42, test_object.foo)
+
+    def test_patch_returns_cleanup(self):
+        # patch(obj, name, value) returns a nullary callable that restores obj
+        # to its original state when run.
+        test_object = TestObj()
+        original = test_object.foo
+        cleanup = patch(test_object, 'foo', 42)
+        cleanup()
+        self.assertEqual(original, test_object.foo)
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_run.py b/third_party/testtools/testtools/tests/test_run.py
new file mode 100644
index 0000000..ac4b9dd
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_run.py
@@ -0,0 +1,309 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the test runner logic."""
+
+from unittest import TestSuite
+import sys
+from textwrap import dedent
+
+from extras import try_import
+fixtures = try_import('fixtures')
+testresources = try_import('testresources')
+
+import testtools
+from testtools import TestCase, run, skipUnless
+from testtools.compat import (
+    _b,
+    _u,
+    StringIO,
+    )
+from testtools.matchers import (
+    Contains,
+    MatchesRegex,
+    )
+
+
+if fixtures:
+    class SampleTestFixture(fixtures.Fixture):
+        """Creates testtools.runexample temporarily."""
+
+        def __init__(self, broken=False):
+            """Create a SampleTestFixture.
+
+            :param broken: If True, the sample file will not be importable.
+            """
+            if not broken:
+                init_contents = _b("""\
+from testtools import TestCase
+
+class TestFoo(TestCase):
+    def test_bar(self):
+        pass
+    def test_quux(self):
+        pass
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
+""")
+            else:
+                init_contents = b"class not in\n"
+            self.package = fixtures.PythonPackage(
+            'runexample', [('__init__.py', init_contents)])
+
+        def setUp(self):
+            super(SampleTestFixture, self).setUp()
+            self.useFixture(self.package)
+            testtools.__path__.append(self.package.base)
+            self.addCleanup(testtools.__path__.remove, self.package.base)
+            self.addCleanup(sys.modules.pop, 'testtools.runexample', None)
+
+
+if fixtures and testresources:
+    class SampleResourcedFixture(fixtures.Fixture):
+        """Creates a test suite that uses testresources."""
+
+        def __init__(self):
+            super(SampleResourcedFixture, self).__init__()
+            self.package = fixtures.PythonPackage(
+            'resourceexample', [('__init__.py', _b("""
+from fixtures import Fixture
+from testresources import (
+    FixtureResource,
+    OptimisingTestSuite,
+    ResourcedTestCase,
+    )
+from testtools import TestCase
+
+class Printer(Fixture):
+
+    def setUp(self):
+        super(Printer, self).setUp()
+        print('Setting up Printer')
+
+    def reset(self):
+        pass
+
+class TestFoo(TestCase, ResourcedTestCase):
+    # When run, this will print just one Setting up Printer, unless the
+    # OptimisingTestSuite is not honoured, when one per test case will print.
+    resources=[('res', FixtureResource(Printer()))]
+    def test_bar(self):
+        pass
+    def test_foo(self):
+        pass
+    def test_quux(self):
+        pass
+def test_suite():
+    from unittest import TestLoader
+    return OptimisingTestSuite(TestLoader().loadTestsFromName(__name__))
+"""))])
+
+        def setUp(self):
+            super(SampleResourcedFixture, self).setUp()
+            self.useFixture(self.package)
+            self.addCleanup(testtools.__path__.remove, self.package.base)
+            testtools.__path__.append(self.package.base)
+
+
+if fixtures and run.have_discover:
+    class SampleLoadTestsPackage(fixtures.Fixture):
+        """Creates a test suite package using load_tests."""
+
+        def __init__(self):
+            super(SampleLoadTestsPackage, self).__init__()
+            self.package = fixtures.PythonPackage(
+            'discoverexample', [('__init__.py', _b("""
+from testtools import TestCase, clone_test_with_new_id
+
+class TestExample(TestCase):
+    def test_foo(self):
+        pass
+
+def load_tests(loader, tests, pattern):
+    tests.addTest(clone_test_with_new_id(tests._tests[1]._tests[0], "fred"))
+    return tests
+"""))])
+
+        def setUp(self):
+            super(SampleLoadTestsPackage, self).setUp()
+            self.useFixture(self.package)
+            self.addCleanup(sys.path.remove, self.package.base)
+
+
+class TestRun(TestCase):
+
+    def setUp(self):
+        super(TestRun, self).setUp()
+        if fixtures is None:
+            self.skipTest("Need fixtures")
+
+    def test_run_custom_list(self):
+        self.useFixture(SampleTestFixture())
+        tests = []
+        class CaptureList(run.TestToolsTestRunner):
+            def list(self, test):
+                tests.append(set([case.id() for case
+                    in testtools.testsuite.iterate_tests(test)]))
+        out = StringIO()
+        try:
+            program = run.TestProgram(
+                argv=['prog', '-l', 'testtools.runexample.test_suite'],
+                stdout=out, testRunner=CaptureList)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
+        self.assertEqual([set(['testtools.runexample.TestFoo.test_bar',
+            'testtools.runexample.TestFoo.test_quux'])], tests)
+
+    def test_run_list(self):
+        self.useFixture(SampleTestFixture())
+        out = StringIO()
+        try:
+            run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
+        self.assertEqual("""testtools.runexample.TestFoo.test_bar
+testtools.runexample.TestFoo.test_quux
+""", out.getvalue())
+
+    def test_run_list_failed_import(self):
+        if not run.have_discover:
+            self.skipTest("Need discover")
+        broken = self.useFixture(SampleTestFixture(broken=True))
+        out = StringIO()
+        exc = self.assertRaises(
+            SystemExit,
+            run.main, ['prog', 'discover', '-l', broken.package.base, '*.py'], out)
+        self.assertEqual(2, exc.args[0])
+        self.assertEqual("""Failed to import
+runexample
+""", out.getvalue())
+
+    def test_run_orders_tests(self):
+        self.useFixture(SampleTestFixture())
+        out = StringIO()
+        # We load two tests - one that exists and one that doesn't, and we
+        # should get the one that exists and neither the one that doesn't nor
+        # the unmentioned one that does.
+        tempdir = self.useFixture(fixtures.TempDir())
+        tempname = tempdir.path + '/tests.list'
+        f = open(tempname, 'wb')
+        try:
+            f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+        finally:
+            f.close()
+        try:
+            run.main(['prog', '-l', '--load-list', tempname,
+                'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
+        self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+    def test_run_load_list(self):
+        self.useFixture(SampleTestFixture())
+        out = StringIO()
+        # We load two tests - one that exists and one that doesn't, and we
+        # should get the one that exists and neither the one that doesn't nor
+        # the unmentioned one that does.
+        tempdir = self.useFixture(fixtures.TempDir())
+        tempname = tempdir.path + '/tests.list'
+        f = open(tempname, 'wb')
+        try:
+            f.write(_b("""
+testtools.runexample.TestFoo.test_bar
+testtools.runexample.missingtest
+"""))
+        finally:
+            f.close()
+        try:
+            run.main(['prog', '-l', '--load-list', tempname,
+                'testtools.runexample.test_suite'], out)
+        except SystemExit:
+            exc_info = sys.exc_info()
+            raise AssertionError("-l tried to exit. %r" % exc_info[1])
+        self.assertEqual("""testtools.runexample.TestFoo.test_bar
+""", out.getvalue())
+
+    def test_load_list_preserves_custom_suites(self):
+        if testresources is None:
+            self.skipTest("Need testresources")
+        self.useFixture(SampleResourcedFixture())
+        # We load two tests, not loading one. Both share a resource, so we
+        # should see just one resource setup occur.
+        tempdir = self.useFixture(fixtures.TempDir())
+        tempname = tempdir.path + '/tests.list'
+        f = open(tempname, 'wb')
+        try:
+            f.write(_b("""
+testtools.resourceexample.TestFoo.test_bar
+testtools.resourceexample.TestFoo.test_foo
+"""))
+        finally:
+            f.close()
+        stdout = self.useFixture(fixtures.StringStream('stdout'))
+        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            try:
+                run.main(['prog', '--load-list', tempname,
+                    'testtools.resourceexample.test_suite'], stdout.stream)
+            except SystemExit:
+                # Evil resides in TestProgram.
+                pass
+        out = stdout.getDetails()['stdout'].as_text()
+        self.assertEqual(1, out.count('Setting up Printer'), "%r" % out)
+
+    def test_run_failfast(self):
+        stdout = self.useFixture(fixtures.StringStream('stdout'))
+
+        class Failing(TestCase):
+            def test_a(self):
+                self.fail('a')
+            def test_b(self):
+                self.fail('b')
+        with fixtures.MonkeyPatch('sys.stdout', stdout.stream):
+            runner = run.TestToolsTestRunner(failfast=True)
+            runner.run(TestSuite([Failing('test_a'), Failing('test_b')]))
+        self.assertThat(
+            stdout.getDetails()['stdout'].as_text(), Contains('Ran 1 test'))
+
+    def test_stdout_honoured(self):
+        self.useFixture(SampleTestFixture())
+        tests = []
+        out = StringIO()
+        exc = self.assertRaises(SystemExit, run.main,
+            argv=['prog', 'testtools.runexample.test_suite'],
+            stdout=out)
+        self.assertEqual((0,), exc.args)
+        self.assertThat(
+            out.getvalue(),
+            MatchesRegex(_u("""Tests running...
+
+Ran 2 tests in \\d.\\d\\d\\ds
+OK
+""")))
+
+    @skipUnless(run.have_discover, "discovery not present")
+    @skipUnless(fixtures, "fixtures not present")
+    def test_issue_16662(self):
+        # unittest's discover implementation didn't handle load_tests on
+        # packages. That is fixed pending commit, but we want to offer it
+        # to all testtools users regardless of Python version.
+        # See http://bugs.python.org/issue16662
+        pkg = self.useFixture(SampleLoadTestsPackage())
+        out = StringIO()
+        self.assertEqual(None, run.main(
+            ['prog', 'discover', '-l', pkg.package.base], out))
+        self.assertEqual(dedent("""\
+            discoverexample.TestExample.test_foo
+            fred
+            """), out.getvalue())
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_runtest.py b/third_party/testtools/testtools/tests/test_runtest.py
new file mode 100644
index 0000000..3ae8b13
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_runtest.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Tests for the RunTest single test execution logic."""
+
+from testtools import (
+    ExtendedToOriginalDecorator,
+    run_test_with,
+    RunTest,
+    TestCase,
+    TestResult,
+    )
+from testtools.matchers import MatchesException, Is, Raises
+from testtools.testresult.doubles import ExtendedTestResult
+from testtools.tests.helpers import FullStackRunTest
+
+
+class TestRunTest(TestCase):
+
+    run_tests_with = FullStackRunTest
+
+    def make_case(self):
+        class Case(TestCase):
+            def test(self):
+                pass
+        return Case('test')
+
+    def test___init___short(self):
+        run = RunTest("bar")
+        self.assertEqual("bar", run.case)
+        self.assertEqual([], run.handlers)
+
+    def test__init____handlers(self):
+        handlers = [("quux", "baz")]
+        run = RunTest("bar", handlers)
+        self.assertEqual(handlers, run.handlers)
+
+    def test__init____handlers_last_resort(self):
+        handlers = [("quux", "baz")]
+        last_resort = "foo"
+        run = RunTest("bar", handlers, last_resort)
+        self.assertEqual(last_resort, run.last_resort)
+
+    def test_run_with_result(self):
+        # test.run passes result down to _run_test_method.
+        log = []
+        class Case(TestCase):
+            def _run_test_method(self, result):
+                log.append(result)
+        case = Case('_run_test_method')
+        run = RunTest(case, lambda x: log.append(x))
+        result = TestResult()
+        run.run(result)
+        self.assertEqual(1, len(log))
+        self.assertEqual(result, log[0].decorated)
+
+    def test_run_no_result_manages_new_result(self):
+        log = []
+        run = RunTest(self.make_case(), lambda x: log.append(x) or x)
+        result = run.run()
+        self.assertIsInstance(result.decorated, TestResult)
+
+    def test__run_core_called(self):
+        case = self.make_case()
+        log = []
+        run = RunTest(case, lambda x: x)
+        run._run_core = lambda: log.append('foo')
+        run.run()
+        self.assertEqual(['foo'], log)
+
+    def test__run_prepared_result_does_not_mask_keyboard(self):
+        class Case(TestCase):
+            def test(self):
+                raise KeyboardInterrupt("go")
+        case = Case('test')
+        run = RunTest(case)
+        run.result = ExtendedTestResult()
+        self.assertThat(lambda: run._run_prepared_result(run.result),
+            Raises(MatchesException(KeyboardInterrupt)))
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
+        # tearDown is still run though!
+        self.assertEqual(True, getattr(case, '_TestCase__teardown_called'))
+
+    def test__run_user_calls_onException(self):
+        case = self.make_case()
+        log = []
+        def handler(exc_info):
+            log.append("got it")
+            self.assertEqual(3, len(exc_info))
+            self.assertIsInstance(exc_info[1], KeyError)
+            self.assertIs(KeyError, exc_info[0])
+        case.addOnException(handler)
+        e = KeyError('Yo')
+        def raises():
+            raise e
+        run = RunTest(case, [(KeyError, None)])
+        run.result = ExtendedTestResult()
+        status = run._run_user(raises)
+        self.assertEqual(run.exception_caught, status)
+        self.assertEqual([], run.result._events)
+        self.assertEqual(["got it"], log)
+
+    def test__run_user_can_catch_Exception(self):
+        case = self.make_case()
+        e = Exception('Yo')
+        def raises():
+            raise e
+        log = []
+        run = RunTest(case, [(Exception, None)])
+        run.result = ExtendedTestResult()
+        status = run._run_user(raises)
+        self.assertEqual(run.exception_caught, status)
+        self.assertEqual([], run.result._events)
+        self.assertEqual([], log)
+
+    def test__run_prepared_result_uncaught_Exception_raised(self):
+        e = KeyError('Yo')
+        class Case(TestCase):
+            def test(self):
+                raise e
+        case = Case('test')
+        log = []
+        def log_exc(self, result, err):
+            log.append((result, err))
+        run = RunTest(case, [(ValueError, log_exc)])
+        run.result = ExtendedTestResult()
+        self.assertThat(lambda: run._run_prepared_result(run.result),
+            Raises(MatchesException(KeyError)))
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
+        self.assertEqual([], log)
+
+    def test__run_prepared_result_uncaught_Exception_triggers_error(self):
+        # https://bugs.launchpad.net/testtools/+bug/1364188
+        # When something isn't handled, the test that was
+        # executing has errored, one way or another.
+        e = SystemExit(0)
+        class Case(TestCase):
+            def test(self):
+                raise e
+        case = Case('test')
+        log = []
+        def log_exc(self, result, err):
+            log.append((result, err))
+        run = RunTest(case, [], log_exc)
+        run.result = ExtendedTestResult()
+        self.assertThat(lambda: run._run_prepared_result(run.result),
+            Raises(MatchesException(SystemExit)))
+        self.assertEqual(
+            [('startTest', case), ('stopTest', case)], run.result._events)
+        self.assertEqual([(run.result, e)], log)
+
+    def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
+        case = self.make_case()
+        def broken_handler(exc_info):
+            # ValueError because thats what we know how to catch - and must
+            # not.
+            raise ValueError('boo')
+        case.addOnException(broken_handler)
+        e = KeyError('Yo')
+        def raises():
+            raise e
+        log = []
+        def log_exc(self, result, err):
+            log.append((result, err))
+        run = RunTest(case, [(ValueError, log_exc)])
+        run.result = ExtendedTestResult()
+        self.assertThat(lambda: run._run_user(raises),
+            Raises(MatchesException(ValueError)))
+        self.assertEqual([], run.result._events)
+        self.assertEqual([], log)
+
+    def test__run_user_returns_result(self):
+        case = self.make_case()
+        def returns():
+            return 1
+        run = RunTest(case)
+        run.result = ExtendedTestResult()
+        self.assertEqual(1, run._run_user(returns))
+        self.assertEqual([], run.result._events)
+
+    def test__run_one_decorates_result(self):
+        log = []
+        class Run(RunTest):
+            def _run_prepared_result(self, result):
+                log.append(result)
+                return result
+        run = Run(self.make_case(), lambda x: x)
+        result = run._run_one('foo')
+        self.assertEqual([result], log)
+        self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
+        self.assertEqual('foo', result.decorated)
+
+    def test__run_prepared_result_calls_start_and_stop_test(self):
+        result = ExtendedTestResult()
+        case = self.make_case()
+        run = RunTest(case, lambda x: x)
+        run.run(result)
+        self.assertEqual([
+            ('startTest', case),
+            ('addSuccess', case),
+            ('stopTest', case),
+            ], result._events)
+
+    def test__run_prepared_result_calls_stop_test_always(self):
+        result = ExtendedTestResult()
+        case = self.make_case()
+        def inner():
+            raise Exception("foo")
+        run = RunTest(case, lambda x: x)
+        run._run_core = inner
+        self.assertThat(lambda: run.run(result),
+            Raises(MatchesException(Exception("foo"))))
+        self.assertEqual([
+            ('startTest', case),
+            ('stopTest', case),
+            ], result._events)
+
+
+class CustomRunTest(RunTest):
+
+    marker = object()
+
+    def run(self, result=None):
+        return self.marker
+
+
+class TestTestCaseSupportForRunTest(TestCase):
+
+    def test_pass_custom_run_test(self):
+        class SomeCase(TestCase):
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo', runTest=CustomRunTest)
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+    def test_default_is_runTest_class_variable(self):
+        class SomeCase(TestCase):
+            run_tests_with = CustomRunTest
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo')
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+    def test_constructor_argument_overrides_class_variable(self):
+        # If a 'runTest' argument is passed to the test's constructor, that
+        # overrides the class variable.
+        marker = object()
+        class DifferentRunTest(RunTest):
+            def run(self, result=None):
+                return marker
+        class SomeCase(TestCase):
+            run_tests_with = CustomRunTest
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo', runTest=DifferentRunTest)
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(marker))
+
+    def test_decorator_for_run_test(self):
+        # Individual test methods can be marked as needing a special runner.
+        class SomeCase(TestCase):
+            @run_test_with(CustomRunTest)
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo')
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+    def test_extended_decorator_for_run_test(self):
+        # Individual test methods can be marked as needing a special runner.
+        # Extra arguments can be passed to the decorator which will then be
+        # passed on to the RunTest object.
+        marker = object()
+        class FooRunTest(RunTest):
+            def __init__(self, case, handlers=None, bar=None):
+                super(FooRunTest, self).__init__(case, handlers)
+                self.bar = bar
+            def run(self, result=None):
+                return self.bar
+        class SomeCase(TestCase):
+            @run_test_with(FooRunTest, bar=marker)
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo')
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(marker))
+
+    def test_works_as_inner_decorator(self):
+        # Even if run_test_with is the innermost decorator, it will be
+        # respected.
+        def wrapped(function):
+            """Silly, trivial decorator."""
+            def decorated(*args, **kwargs):
+                return function(*args, **kwargs)
+            decorated.__name__ = function.__name__
+            decorated.__dict__.update(function.__dict__)
+            return decorated
+        class SomeCase(TestCase):
+            @wrapped
+            @run_test_with(CustomRunTest)
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo')
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(CustomRunTest.marker))
+
+    def test_constructor_overrides_decorator(self):
+        # If a 'runTest' argument is passed to the test's constructor, that
+        # overrides the decorator.
+        marker = object()
+        class DifferentRunTest(RunTest):
+            def run(self, result=None):
+                return marker
+        class SomeCase(TestCase):
+            @run_test_with(CustomRunTest)
+            def test_foo(self):
+                pass
+        result = TestResult()
+        case = SomeCase('test_foo', runTest=DifferentRunTest)
+        from_run_test = case.run(result)
+        self.assertThat(from_run_test, Is(marker))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_spinner.py b/third_party/testtools/testtools/tests/test_spinner.py
new file mode 100644
index 0000000..31110ca
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_spinner.py
@@ -0,0 +1,326 @@
+# Copyright (c) 2010 testtools developers. See LICENSE for details.
+
+"""Tests for the evil Twisted reactor-spinning we do."""
+
+import os
+import signal
+
+from extras import try_import
+
+from testtools import (
+    skipIf,
+    TestCase,
+    )
+from testtools.matchers import (
+    Equals,
+    Is,
+    MatchesException,
+    Raises,
+    )
+
+_spinner = try_import('testtools._spinner')
+
+defer = try_import('twisted.internet.defer')
+Failure = try_import('twisted.python.failure.Failure')
+
+
+class NeedsTwistedTestCase(TestCase):
+
+    def setUp(self):
+        super(NeedsTwistedTestCase, self).setUp()
+        if defer is None or Failure is None:
+            self.skipTest("Need Twisted to run")
+
+
+class TestNotReentrant(NeedsTwistedTestCase):
+
+    def test_not_reentrant(self):
+        # A function decorated as not being re-entrant will raise a
+        # _spinner.ReentryError if it is called while it is running.
+        calls = []
+        @_spinner.not_reentrant
+        def log_something():
+            calls.append(None)
+            if len(calls) < 5:
+                log_something()
+        self.assertThat(
+            log_something, Raises(MatchesException(_spinner.ReentryError)))
+        self.assertEqual(1, len(calls))
+
+    def test_deeper_stack(self):
+        calls = []
+        @_spinner.not_reentrant
+        def g():
+            calls.append(None)
+            if len(calls) < 5:
+                f()
+        @_spinner.not_reentrant
+        def f():
+            calls.append(None)
+            if len(calls) < 5:
+                g()
+        self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
+        self.assertEqual(2, len(calls))
+
+
+class TestExtractResult(NeedsTwistedTestCase):
+
+    def test_not_fired(self):
+        # _spinner.extract_result raises _spinner.DeferredNotFired if it's
+        # given a Deferred that has not fired.
+        self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
+            Raises(MatchesException(_spinner.DeferredNotFired)))
+
+    def test_success(self):
+        # _spinner.extract_result returns the value of the Deferred if it has
+        # fired successfully.
+        marker = object()
+        d = defer.succeed(marker)
+        self.assertThat(_spinner.extract_result(d), Equals(marker))
+
+    def test_failure(self):
+        # _spinner.extract_result raises the failure's exception if it's given
+        # a Deferred that is failing.
+        try:
+            1/0
+        except ZeroDivisionError:
+            f = Failure()
+        d = defer.fail(f)
+        self.assertThat(lambda:_spinner.extract_result(d),
+            Raises(MatchesException(ZeroDivisionError)))
+
+
+class TestTrapUnhandledErrors(NeedsTwistedTestCase):
+
+    def test_no_deferreds(self):
+        marker = object()
+        result, errors = _spinner.trap_unhandled_errors(lambda: marker)
+        self.assertEqual([], errors)
+        self.assertIs(marker, result)
+
+    def test_unhandled_error(self):
+        failures = []
+        def make_deferred_but_dont_handle():
+            try:
+                1/0
+            except ZeroDivisionError:
+                f = Failure()
+                failures.append(f)
+                defer.fail(f)
+        result, errors = _spinner.trap_unhandled_errors(
+            make_deferred_but_dont_handle)
+        self.assertIs(None, result)
+        self.assertEqual(failures, [error.failResult for error in errors])
+
+
+class TestRunInReactor(NeedsTwistedTestCase):
+
+    def make_reactor(self):
+        from twisted.internet import reactor
+        return reactor
+
+    def make_spinner(self, reactor=None):
+        if reactor is None:
+            reactor = self.make_reactor()
+        return _spinner.Spinner(reactor)
+
+    def make_timeout(self):
+        return 0.01
+
+    def test_function_called(self):
+        # run_in_reactor actually calls the function given to it.
+        calls = []
+        marker = object()
+        self.make_spinner().run(self.make_timeout(), calls.append, marker)
+        self.assertThat(calls, Equals([marker]))
+
+    def test_return_value_returned(self):
+        # run_in_reactor returns the value returned by the function given to
+        # it.
+        marker = object()
+        result = self.make_spinner().run(self.make_timeout(), lambda: marker)
+        self.assertThat(result, Is(marker))
+
+    def test_exception_reraised(self):
+        # If the given function raises an error, run_in_reactor re-raises that
+        # error.
+        self.assertThat(
+            lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
+            Raises(MatchesException(ZeroDivisionError)))
+
+    def test_keyword_arguments(self):
+        # run_in_reactor passes keyword arguments on.
+        calls = []
+        function = lambda *a, **kw: calls.extend([a, kw])
+        self.make_spinner().run(self.make_timeout(), function, foo=42)
+        self.assertThat(calls, Equals([(), {'foo': 42}]))
+
+    def test_not_reentrant(self):
+        # run_in_reactor raises an error if it is called inside another call
+        # to run_in_reactor.
+        spinner = self.make_spinner()
+        self.assertThat(lambda: spinner.run(
+            self.make_timeout(), spinner.run, self.make_timeout(),
+            lambda: None), Raises(MatchesException(_spinner.ReentryError)))
+
+    def test_deferred_value_returned(self):
+        # If the given function returns a Deferred, run_in_reactor returns the
+        # value in the Deferred at the end of the callback chain.
+        marker = object()
+        result = self.make_spinner().run(
+            self.make_timeout(), lambda: defer.succeed(marker))
+        self.assertThat(result, Is(marker))
+
+    def test_preserve_signal_handler(self):
+        signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
+        signals = filter(
+            None, (getattr(signal, name, None) for name in signals))
+        for sig in signals:
+            self.addCleanup(signal.signal, sig, signal.getsignal(sig))
+        new_hdlrs = list(lambda *a: None for _ in signals)
+        for sig, hdlr in zip(signals, new_hdlrs):
+            signal.signal(sig, hdlr)
+        spinner = self.make_spinner()
+        spinner.run(self.make_timeout(), lambda: None)
+        self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
+
+    def test_timeout(self):
+        # If the function takes too long to run, we raise a
+        # _spinner.TimeoutError.
+        timeout = self.make_timeout()
+        self.assertThat(
+            lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
+            Raises(MatchesException(_spinner.TimeoutError)))
+
+    def test_no_junk_by_default(self):
+        # If the reactor hasn't spun yet, then there cannot be any junk.
+        spinner = self.make_spinner()
+        self.assertThat(spinner.get_junk(), Equals([]))
+
+    def test_clean_do_nothing(self):
+        # If there's nothing going on in the reactor, then clean does nothing
+        # and returns an empty list.
+        spinner = self.make_spinner()
+        result = spinner._clean()
+        self.assertThat(result, Equals([]))
+
+    def test_clean_delayed_call(self):
+        # If there's a delayed call in the reactor, then clean cancels it and
+        # returns an empty list.
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        call = reactor.callLater(10, lambda: None)
+        results = spinner._clean()
+        self.assertThat(results, Equals([call]))
+        self.assertThat(call.active(), Equals(False))
+
+    def test_clean_delayed_call_cancelled(self):
+        # If there's a delayed call that's just been cancelled, then it's no
+        # longer there.
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        call = reactor.callLater(10, lambda: None)
+        call.cancel()
+        results = spinner._clean()
+        self.assertThat(results, Equals([]))
+
+    def test_clean_selectables(self):
+        # If there's still a selectable (e.g. a listening socket), then
+        # clean() removes it from the reactor's registry.
+        #
+        # Note that the socket is left open. This emulates a bug in trial.
+        from twisted.internet.protocol import ServerFactory
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        port = reactor.listenTCP(0, ServerFactory(), interface='127.0.0.1')
+        spinner.run(self.make_timeout(), lambda: None)
+        results = spinner.get_junk()
+        self.assertThat(results, Equals([port]))
+
+    def test_clean_running_threads(self):
+        import threading
+        import time
+        current_threads = list(threading.enumerate())
+        reactor = self.make_reactor()
+        timeout = self.make_timeout()
+        spinner = self.make_spinner(reactor)
+        spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
+        self.assertThat(list(threading.enumerate()), Equals(current_threads))
+
+    def test_leftover_junk_available(self):
+        # If 'run' is given a function that leaves the reactor dirty in some
+        # way, 'run' will clean up the reactor and then store information
+        # about the junk. This information can be got using get_junk.
+        from twisted.internet.protocol import ServerFactory
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        port = spinner.run(
+            self.make_timeout(), reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+        self.assertThat(spinner.get_junk(), Equals([port]))
+
+    def test_will_not_run_with_previous_junk(self):
+        # If 'run' is called and there's still junk in the spinner's junk
+        # list, then the spinner will refuse to run.
+        from twisted.internet.protocol import ServerFactory
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        timeout = self.make_timeout()
+        spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+        self.assertThat(lambda: spinner.run(timeout, lambda: None),
+            Raises(MatchesException(_spinner.StaleJunkError)))
+
+    def test_clear_junk_clears_previous_junk(self):
+        # If 'run' is called and there's still junk in the spinner's junk
+        # list, then the spinner will refuse to run.
+        from twisted.internet.protocol import ServerFactory
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        timeout = self.make_timeout()
+        port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory(), interface='127.0.0.1')
+        junk = spinner.clear_junk()
+        self.assertThat(junk, Equals([port]))
+        self.assertThat(spinner.get_junk(), Equals([]))
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_sigint_raises_no_result_error(self):
+        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+        SIGINT = getattr(signal, 'SIGINT', None)
+        if not SIGINT:
+            self.skipTest("SIGINT not available")
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        timeout = self.make_timeout()
+        reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
+        self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+            Raises(MatchesException(_spinner.NoResultError)))
+        self.assertEqual([], spinner._clean())
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_sigint_raises_no_result_error_second_time(self):
+        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+        # This test is exactly the same as test_sigint_raises_no_result_error,
+        # and exists to make sure we haven't futzed with state.
+        self.test_sigint_raises_no_result_error()
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_fast_sigint_raises_no_result_error(self):
+        # If we get a SIGINT during a run, we raise _spinner.NoResultError.
+        SIGINT = getattr(signal, 'SIGINT', None)
+        if not SIGINT:
+            self.skipTest("SIGINT not available")
+        reactor = self.make_reactor()
+        spinner = self.make_spinner(reactor)
+        timeout = self.make_timeout()
+        reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
+        self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
+            Raises(MatchesException(_spinner.NoResultError)))
+        self.assertEqual([], spinner._clean())
+
+    @skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
+    def test_fast_sigint_raises_no_result_error_second_time(self):
+        self.test_fast_sigint_raises_no_result_error()
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_tags.py b/third_party/testtools/testtools/tests/test_tags.py
new file mode 100644
index 0000000..5010f9a
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_tags.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2012 testtools developers. See LICENSE for details.
+
+"""Test tag support."""
+
+
+from testtools import TestCase
+from testtools.tags import TagContext
+
+
+class TestTags(TestCase):
+
+    def test_no_tags(self):
+        # A tag context has no tags initially.
+        tag_context = TagContext()
+        self.assertEqual(set(), tag_context.get_current_tags())
+
+    def test_add_tag(self):
+        # A tag added with change_tags appears in get_current_tags.
+        tag_context = TagContext()
+        tag_context.change_tags(set(['foo']), set())
+        self.assertEqual(set(['foo']), tag_context.get_current_tags())
+
+    def test_add_tag_twice(self):
+        # Calling change_tags twice to add tags adds both tags to the current
+        # tags.
+        tag_context = TagContext()
+        tag_context.change_tags(set(['foo']), set())
+        tag_context.change_tags(set(['bar']), set())
+        self.assertEqual(
+            set(['foo', 'bar']), tag_context.get_current_tags())
+
+    def test_change_tags_returns_tags(self):
+        # change_tags returns the current tags.  This is a convenience.
+        tag_context = TagContext()
+        tags = tag_context.change_tags(set(['foo']), set())
+        self.assertEqual(set(['foo']), tags)
+
+    def test_remove_tag(self):
+        # change_tags can remove tags from the context.
+        tag_context = TagContext()
+        tag_context.change_tags(set(['foo']), set())
+        tag_context.change_tags(set(), set(['foo']))
+        self.assertEqual(set(), tag_context.get_current_tags())
+
+    def test_child_context(self):
+        # A TagContext can have a parent.  If so, its tags are the tags of the
+        # parent at the moment of construction.
+        parent = TagContext()
+        parent.change_tags(set(['foo']), set())
+        child = TagContext(parent)
+        self.assertEqual(
+            parent.get_current_tags(), child.get_current_tags())
+
+    def test_add_to_child(self):
+        # Adding a tag to the child context doesn't affect the parent.
+        parent = TagContext()
+        parent.change_tags(set(['foo']), set())
+        child = TagContext(parent)
+        child.change_tags(set(['bar']), set())
+        self.assertEqual(set(['foo', 'bar']), child.get_current_tags())
+        self.assertEqual(set(['foo']), parent.get_current_tags())
+
+    def test_remove_in_child(self):
+        # A tag that was in the parent context can be removed from the child
+        # context without affect the parent.
+        parent = TagContext()
+        parent.change_tags(set(['foo']), set())
+        child = TagContext(parent)
+        child.change_tags(set(), set(['foo']))
+        self.assertEqual(set(), child.get_current_tags())
+        self.assertEqual(set(['foo']), parent.get_current_tags())
+
+    def test_parent(self):
+        # The parent can be retrieved from a child context.
+        parent = TagContext()
+        parent.change_tags(set(['foo']), set())
+        child = TagContext(parent)
+        child.change_tags(set(), set(['foo']))
+        self.assertEqual(parent, child.parent)
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_testcase.py b/third_party/testtools/testtools/tests/test_testcase.py
new file mode 100644
index 0000000..4f3e146
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_testcase.py
@@ -0,0 +1,1733 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Tests for extensions to the base test library."""
+
+from doctest import ELLIPSIS
+from pprint import pformat
+import sys
+import unittest
+
+from testtools import (
+    DecorateTestCaseResult,
+    ErrorHolder,
+    MultipleExceptions,
+    PlaceHolder,
+    TestCase,
+    clone_test_with_new_id,
+    content,
+    skip,
+    skipIf,
+    skipUnless,
+    testcase,
+    )
+from testtools.compat import (
+    _b,
+    _u,
+    )
+from testtools.content import (
+    text_content,
+    TracebackContent,
+    )
+from testtools.matchers import (
+    Annotate,
+    DocTestMatches,
+    Equals,
+    HasLength,
+    MatchesException,
+    Raises,
+    )
+from testtools.testcase import (
+    attr,
+    Nullary,
+    WithAttributes,
+    )
+from testtools.testresult.doubles import (
+    Python26TestResult,
+    Python27TestResult,
+    ExtendedTestResult,
+    )
+from testtools.tests.helpers import (
+    an_exc_info,
+    FullStackRunTest,
+    LoggingResult,
+    )
+try:
+    exec('from __future__ import with_statement')
+except SyntaxError:
+    pass
+else:
+    from testtools.tests.test_with_with import *
+
+
+class TestPlaceHolder(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    def makePlaceHolder(self, test_id="foo", short_description=None):
+        return PlaceHolder(test_id, short_description)
+
+    def test_id_comes_from_constructor(self):
+        # The id() of a PlaceHolder is whatever you pass into the constructor.
+        test = PlaceHolder("test id")
+        self.assertEqual("test id", test.id())
+
+    def test_shortDescription_is_id(self):
+        # The shortDescription() of a PlaceHolder is the id, by default.
+        test = PlaceHolder("test id")
+        self.assertEqual(test.id(), test.shortDescription())
+
+    def test_shortDescription_specified(self):
+        # If a shortDescription is provided to the constructor, then
+        # shortDescription() returns that instead.
+        test = PlaceHolder("test id", "description")
+        self.assertEqual("description", test.shortDescription())
+
+    def test_repr_just_id(self):
+        # repr(placeholder) shows you how the object was constructed.
+        test = PlaceHolder("test id")
+        self.assertEqual(
+            "<testtools.testcase.PlaceHolder('addSuccess', %s, {})>" % repr(
+            test.id()), repr(test))
+
+    def test_repr_with_description(self):
+        # repr(placeholder) shows you how the object was constructed.
+        test = PlaceHolder("test id", "description")
+        self.assertEqual(
+            "<testtools.testcase.PlaceHolder('addSuccess', %r, {}, %r)>" % (
+            test.id(), test.shortDescription()), repr(test))
+
+    def test_repr_custom_outcome(self):
+        test = PlaceHolder("test id", outcome='addSkip')
+        self.assertEqual(
+            "<testtools.testcase.PlaceHolder('addSkip', %r, {})>" % (
+            test.id()), repr(test))
+
+    def test_counts_as_one_test(self):
+        # A placeholder test counts as one test.
+        test = self.makePlaceHolder()
+        self.assertEqual(1, test.countTestCases())
+
+    def test_str_is_id(self):
+        # str(placeholder) is always the id(). We are not barbarians.
+        test = self.makePlaceHolder()
+        self.assertEqual(test.id(), str(test))
+
+    def test_runs_as_success(self):
+        # When run, a PlaceHolder test records a success.
+        test = self.makePlaceHolder()
+        log = []
+        test.run(LoggingResult(log))
+        self.assertEqual(
+            [('tags', set(), set()), ('startTest', test), ('addSuccess', test),
+             ('stopTest', test), ('tags', set(), set()),],
+            log)
+
+    def test_supplies_details(self):
+        details = {'quux':None}
+        test = PlaceHolder('foo', details=details)
+        result = ExtendedTestResult()
+        test.run(result)
+        self.assertEqual(
+            [('tags', set(), set()),
+             ('startTest', test),
+             ('addSuccess', test, details),
+             ('stopTest', test),
+             ('tags', set(), set()),
+             ],
+            result._events)
+
+    def test_supplies_timestamps(self):
+        test = PlaceHolder('foo', details={}, timestamps=["A", "B"])
+        result = ExtendedTestResult()
+        test.run(result)
+        self.assertEqual(
+            [('time', "A"),
+             ('tags', set(), set()),
+             ('startTest', test),
+             ('time', "B"),
+             ('addSuccess', test),
+             ('stopTest', test),
+             ('tags', set(), set()),
+             ],
+            result._events)
+
+    def test_call_is_run(self):
+        # A PlaceHolder can be called, in which case it behaves like run.
+        test = self.makePlaceHolder()
+        run_log = []
+        test.run(LoggingResult(run_log))
+        call_log = []
+        test(LoggingResult(call_log))
+        self.assertEqual(run_log, call_log)
+
+    def test_runs_without_result(self):
+        # A PlaceHolder can be run without a result, in which case there's no
+        # way to actually get at the result.
+        self.makePlaceHolder().run()
+
+    def test_debug(self):
+        # A PlaceHolder can be debugged.
+        self.makePlaceHolder().debug()
+
+    def test_supports_tags(self):
+        result = ExtendedTestResult()
+        tags = set(['foo', 'bar'])
+        case = PlaceHolder("foo", tags=tags)
+        case.run(result)
+        self.assertEqual([
+            ('tags', tags, set()),
+            ('startTest', case),
+            ('addSuccess', case),
+            ('stopTest', case),
+            ('tags', set(), tags),
+            ], result._events)
+
+
+class TestErrorHolder(TestCase):
+    # Note that these tests exist because ErrorHolder exists - it could be
+    # deprecated and dropped at this point.
+
+    run_test_with = FullStackRunTest
+
+    def makeException(self):
+        try:
+            raise RuntimeError("danger danger")
+        except:
+            return sys.exc_info()
+
+    def makePlaceHolder(self, test_id="foo", error=None,
+                        short_description=None):
+        if error is None:
+            error = self.makeException()
+        return ErrorHolder(test_id, error, short_description)
+
+    def test_id_comes_from_constructor(self):
+        # The id() of a PlaceHolder is whatever you pass into the constructor.
+        test = ErrorHolder("test id", self.makeException())
+        self.assertEqual("test id", test.id())
+
+    def test_shortDescription_is_id(self):
+        # The shortDescription() of a PlaceHolder is the id, by default.
+        test = ErrorHolder("test id", self.makeException())
+        self.assertEqual(test.id(), test.shortDescription())
+
+    def test_shortDescription_specified(self):
+        # If a shortDescription is provided to the constructor, then
+        # shortDescription() returns that instead.
+        test = ErrorHolder("test id", self.makeException(), "description")
+        self.assertEqual("description", test.shortDescription())
+
+    def test_counts_as_one_test(self):
+        # A placeholder test counts as one test.
+        test = self.makePlaceHolder()
+        self.assertEqual(1, test.countTestCases())
+
+    def test_str_is_id(self):
+        # str(placeholder) is always the id(). We are not barbarians.
+        test = self.makePlaceHolder()
+        self.assertEqual(test.id(), str(test))
+
+    def test_runs_as_error(self):
+        # When run, an ErrorHolder test records an error.
+        error = self.makeException()
+        test = self.makePlaceHolder(error=error)
+        result = ExtendedTestResult()
+        log = result._events
+        test.run(result)
+        self.assertEqual(
+            [('tags', set(), set()),
+             ('startTest', test),
+             ('addError', test, test._details),
+             ('stopTest', test),
+             ('tags', set(), set())], log)
+
+    def test_call_is_run(self):
+        # A PlaceHolder can be called, in which case it behaves like run.
+        test = self.makePlaceHolder()
+        run_log = []
+        test.run(LoggingResult(run_log))
+        call_log = []
+        test(LoggingResult(call_log))
+        self.assertEqual(run_log, call_log)
+
+    def test_runs_without_result(self):
+        # A PlaceHolder can be run without a result, in which case there's no
+        # way to actually get at the result.
+        self.makePlaceHolder().run()
+
+    def test_debug(self):
+        # A PlaceHolder can be debugged.
+        self.makePlaceHolder().debug()
+
+
+class TestEquality(TestCase):
+    """Test ``TestCase``'s equality implementation."""
+
+    run_test_with = FullStackRunTest
+
+    def test_identicalIsEqual(self):
+        # TestCase's are equal if they are identical.
+        self.assertEqual(self, self)
+
+    def test_nonIdenticalInUnequal(self):
+        # TestCase's are not equal if they are not identical.
+        self.assertNotEqual(TestCase(methodName='run'),
+            TestCase(methodName='skip'))
+
+
+class TestAssertions(TestCase):
+    """Test assertions in TestCase."""
+
+    run_test_with = FullStackRunTest
+
+    def raiseError(self, exceptionFactory, *args, **kwargs):
+        raise exceptionFactory(*args, **kwargs)
+
+    def test_formatTypes_single(self):
+        # Given a single class, _formatTypes returns the name.
+        class Foo(object):
+            pass
+        self.assertEqual('Foo', self._formatTypes(Foo))
+
+    def test_formatTypes_multiple(self):
+        # Given multiple types, _formatTypes returns the names joined by
+        # commas.
+        class Foo(object):
+            pass
+        class Bar(object):
+            pass
+        self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
+
+    def test_assertRaises(self):
+        # assertRaises asserts that a callable raises a particular exception.
+        self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+
+    def test_assertRaises_exception_w_metaclass(self):
+        # assertRaises works when called for exceptions with custom metaclasses
+        class MyExMeta(type):
+            def __init__(cls, name, bases, dct):
+                """ Do some dummy metaclass stuff """
+                dct.update({'answer': 42})
+                type.__init__(cls, name, bases, dct)
+
+        class MyEx(Exception):
+            __metaclass__ = MyExMeta
+
+        self.assertRaises(MyEx, self.raiseError, MyEx)
+
+    def test_assertRaises_fails_when_no_error_raised(self):
+        # assertRaises raises self.failureException when it's passed a
+        # callable that raises no error.
+        ret = ('orange', 42)
+        self.assertFails(
+            "<function ...<lambda> at ...> returned ('orange', 42)",
+            self.assertRaises, RuntimeError, lambda: ret)
+
+    def test_assertRaises_fails_when_different_error_raised(self):
+        # assertRaises re-raises an exception that it didn't expect.
+        self.assertThat(lambda: self.assertRaises(RuntimeError,
+            self.raiseError, ZeroDivisionError),
+            Raises(MatchesException(ZeroDivisionError)))
+
+    def test_assertRaises_returns_the_raised_exception(self):
+        # assertRaises returns the exception object that was raised. This is
+        # useful for testing that exceptions have the right message.
+
+        # This contraption stores the raised exception, so we can compare it
+        # to the return value of assertRaises.
+        raisedExceptions = []
+        def raiseError():
+            try:
+                raise RuntimeError('Deliberate error')
+            except RuntimeError:
+                raisedExceptions.append(sys.exc_info()[1])
+                raise
+
+        exception = self.assertRaises(RuntimeError, raiseError)
+        self.assertEqual(1, len(raisedExceptions))
+        self.assertTrue(
+            exception is raisedExceptions[0],
+            "%r is not %r" % (exception, raisedExceptions[0]))
+
+    def test_assertRaises_with_multiple_exceptions(self):
+        # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
+        # function raises one of ExceptionTwo or ExceptionOne.
+        expectedExceptions = (RuntimeError, ZeroDivisionError)
+        self.assertRaises(
+            expectedExceptions, self.raiseError, expectedExceptions[0])
+        self.assertRaises(
+            expectedExceptions, self.raiseError, expectedExceptions[1])
+
+    def test_assertRaises_with_multiple_exceptions_failure_mode(self):
+        # If assertRaises is called expecting one of a group of exceptions and
+        # a callable that doesn't raise an exception, then fail with an
+        # appropriate error message.
+        expectedExceptions = (RuntimeError, ZeroDivisionError)
+        self.assertRaises(
+            self.failureException,
+            self.assertRaises, expectedExceptions, lambda: None)
+        self.assertFails('<function ...<lambda> at ...> returned None',
+            self.assertRaises, expectedExceptions, lambda: None)
+
+    def test_assertRaises_function_repr_in_exception(self):
+        # When assertRaises fails, it includes the repr of the invoked
+        # function in the error message, so it's easy to locate the problem.
+        def foo():
+            """An arbitrary function."""
+            pass
+        self.assertThat(
+            lambda: self.assertRaises(Exception, foo),
+            Raises(
+                MatchesException(self.failureException, '.*%r.*' % (foo,))))
+
+    def assertFails(self, message, function, *args, **kwargs):
+        """Assert that function raises a failure with the given message."""
+        failure = self.assertRaises(
+            self.failureException, function, *args, **kwargs)
+        self.assertThat(failure, DocTestMatches(message, ELLIPSIS))
+
+    def test_assertIn_success(self):
+        # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
+        self.assertIn(3, range(10))
+        self.assertIn('foo', 'foo bar baz')
+        self.assertIn('foo', 'foo bar baz'.split())
+
+    def test_assertIn_failure(self):
+        # assertIn(needle, haystack) fails the test when 'needle' is not in
+        # 'haystack'.
+        self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
+        self.assertFails(
+            '%r not in %r' % ('qux', 'foo bar baz'),
+            self.assertIn, 'qux', 'foo bar baz')
+
+    def test_assertIn_failure_with_message(self):
+        # assertIn(needle, haystack) fails the test when 'needle' is not in
+        # 'haystack'.
+        self.assertFails('3 not in [0, 1, 2]: foo bar', self.assertIn, 3,
+                         [0, 1, 2], 'foo bar')
+        self.assertFails(
+            '%r not in %r: foo bar' % ('qux', 'foo bar baz'),
+            self.assertIn, 'qux', 'foo bar baz', 'foo bar')
+
+
+    def test_assertNotIn_success(self):
+        # assertNotIn(needle, haystack) asserts that 'needle' is not in
+        # 'haystack'.
+        self.assertNotIn(3, [0, 1, 2])
+        self.assertNotIn('qux', 'foo bar baz')
+
+    def test_assertNotIn_failure(self):
+        # assertNotIn(needle, haystack) fails the test when 'needle' is in
+        # 'haystack'.
+        self.assertFails('[1, 2, 3] matches Contains(3)', self.assertNotIn,
+            3, [1, 2, 3])
+        self.assertFails(
+            "'foo bar baz' matches Contains('foo')",
+            self.assertNotIn, 'foo', 'foo bar baz')
+
+
+    def test_assertNotIn_failure_with_message(self):
+        # assertNotIn(needle, haystack) fails the test when 'needle' is in
+        # 'haystack'.
+        self.assertFails('[1, 2, 3] matches Contains(3): foo bar', self.assertNotIn,
+            3, [1, 2, 3], 'foo bar')
+        self.assertFails(
+            "'foo bar baz' matches Contains('foo'): foo bar",
+            self.assertNotIn, 'foo', 'foo bar baz', "foo bar")
+
+
+
+    def test_assertIsInstance(self):
+        # assertIsInstance asserts that an object is an instance of a class.
+
+        class Foo(object):
+            """Simple class for testing assertIsInstance."""
+
+        foo = Foo()
+        self.assertIsInstance(foo, Foo)
+
+    def test_assertIsInstance_multiple_classes(self):
+        # assertIsInstance asserts that an object is an instance of one of a
+        # group of classes.
+
+        class Foo(object):
+            """Simple class for testing assertIsInstance."""
+
+        class Bar(object):
+            """Another simple class for testing assertIsInstance."""
+
+        foo = Foo()
+        self.assertIsInstance(foo, (Foo, Bar))
+        self.assertIsInstance(Bar(), (Foo, Bar))
+
+    def test_assertIsInstance_failure(self):
+        # assertIsInstance(obj, klass) fails the test when obj is not an
+        # instance of klass.
+
+        class Foo(object):
+            """Simple class for testing assertIsInstance."""
+
+        self.assertFails(
+            "'42' is not an instance of %s" % self._formatTypes(Foo),
+            self.assertIsInstance, 42, Foo)
+
+    def test_assertIsInstance_failure_multiple_classes(self):
+        # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
+        # not an instance of klass1 or klass2.
+
+        class Foo(object):
+            """Simple class for testing assertIsInstance."""
+
+        class Bar(object):
+            """Another simple class for testing assertIsInstance."""
+
+        self.assertFails(
+            "'42' is not an instance of any of (%s)" % self._formatTypes([Foo, Bar]),
+            self.assertIsInstance, 42, (Foo, Bar))
+
+    def test_assertIsInstance_overridden_message(self):
+        # assertIsInstance(obj, klass, msg) permits a custom message.
+        self.assertFails("'42' is not an instance of str: foo",
+            self.assertIsInstance, 42, str, "foo")
+
+    def test_assertIs(self):
+        # assertIs asserts that an object is identical to another object.
+        self.assertIs(None, None)
+        some_list = [42]
+        self.assertIs(some_list, some_list)
+        some_object = object()
+        self.assertIs(some_object, some_object)
+
+    def test_assertIs_fails(self):
+        # assertIs raises assertion errors if one object is not identical to
+        # another.
+        self.assertFails('None is not 42', self.assertIs, None, 42)
+        self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
+
+    def test_assertIs_fails_with_message(self):
+        # assertIs raises assertion errors if one object is not identical to
+        # another, and includes a user-supplied message, if it's provided.
+        self.assertFails(
+            'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
+
+    def test_assertIsNot(self):
+        # assertIsNot asserts that an object is not identical to another
+        # object.
+        self.assertIsNot(None, 42)
+        self.assertIsNot([42], [42])
+        self.assertIsNot(object(), object())
+
+    def test_assertIsNot_fails(self):
+        # assertIsNot raises assertion errors if one object is identical to
+        # another.
+        self.assertFails('None matches Is(None)', self.assertIsNot, None, None)
+        some_list = [42]
+        self.assertFails(
+            '[42] matches Is([42])', self.assertIsNot, some_list, some_list)
+
+    def test_assertIsNot_fails_with_message(self):
+        # assertIsNot raises assertion errors if one object is identical to
+        # another, and includes a user-supplied message if it's provided.
+        self.assertFails(
+            'None matches Is(None): foo bar', self.assertIsNot, None, None,
+            "foo bar")
+
+    def test_assertThat_matches_clean(self):
+        class Matcher(object):
+            def match(self, foo):
+                return None
+        self.assertThat("foo", Matcher())
+
+    def test_assertThat_mismatch_raises_description(self):
+        calls = []
+        class Mismatch(object):
+            def __init__(self, thing):
+                self.thing = thing
+            def describe(self):
+                calls.append(('describe_diff', self.thing))
+                return "object is not a thing"
+            def get_details(self):
+                return {}
+        class Matcher(object):
+            def match(self, thing):
+                calls.append(('match', thing))
+                return Mismatch(thing)
+            def __str__(self):
+                calls.append(('__str__',))
+                return "a description"
+        class Test(TestCase):
+            def test(self):
+                self.assertThat("foo", Matcher())
+        result = Test("test").run()
+        self.assertEqual([
+            ('match', "foo"),
+            ('describe_diff', "foo"),
+            ], calls)
+        self.assertFalse(result.wasSuccessful())
+
+    def test_assertThat_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = matcher.match(matchee).describe()
+        self.assertFails(expected, self.assertThat, matchee, matcher)
+
+    def test_assertThat_message_is_annotated(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = Annotate('woo', matcher).match(matchee).describe()
+        self.assertFails(expected, self.assertThat, matchee, matcher, 'woo')
+
+    def test_assertThat_verbose_output(self):
+        matchee = 'foo'
+        matcher = Equals('bar')
+        expected = (
+            'Match failed. Matchee: %r\n'
+            'Matcher: %s\n'
+            'Difference: %s\n' % (
+                matchee,
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        self.assertFails(
+            expected, self.assertThat, matchee, matcher, verbose=True)
+
+    def test_expectThat_matches_clean(self):
+        class Matcher(object):
+            def match(self, foo):
+                return None
+        self.expectThat("foo", Matcher())
+
+    def test_expectThat_mismatch_fails_test(self):
+        class Test(TestCase):
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+        result = Test("test").run()
+        self.assertFalse(result.wasSuccessful())
+
+    def test_expectThat_does_not_exit_test(self):
+        class Test(TestCase):
+            marker = False
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+                Test.marker = True
+        result = Test("test").run()
+        self.assertFalse(result.wasSuccessful())
+        self.assertTrue(Test.marker)
+
+    def test_expectThat_adds_detail(self):
+        class Test(TestCase):
+            def test(self):
+                self.expectThat("foo", Equals("bar"))
+        test = Test("test")
+        result = test.run()
+        details = test.getDetails()
+        self.assertTrue("Failed expectation" in details)
+
+    def test__force_failure_fails_test(self):
+        class Test(TestCase):
+            def test_foo(self):
+                self.force_failure = True
+                self.remaining_code_run = True
+        test = Test('test_foo')
+        result = test.run()
+        self.assertFalse(result.wasSuccessful())
+        self.assertTrue(test.remaining_code_run)
+
+    def get_error_string(self, e):
+        """Get the string showing how 'e' would be formatted in test output.
+
+        This is a little bit hacky, since it's designed to give consistent
+        output regardless of Python version.
+
+        In testtools, TestResult._exc_info_to_unicode is the point of dispatch
+        between various different implementations of methods that format
+        exceptions, so that's what we have to call. However, that method cares
+        about stack traces and formats the exception class. We don't care
+        about either of these, so we take its output and parse it a little.
+        """
+        error = TracebackContent((e.__class__, e, None), self).as_text()
+        # We aren't at all interested in the traceback.
+        if error.startswith('Traceback (most recent call last):\n'):
+            lines = error.splitlines(True)[1:]
+            for i, line in enumerate(lines):
+                if not line.startswith(' '):
+                    break
+            error = ''.join(lines[i:])
+        # We aren't interested in how the exception type is formatted.
+        exc_class, error = error.split(': ', 1)
+        return error
+
+    def test_assertThat_verbose_unicode(self):
+        # When assertThat is given matchees or matchers that contain non-ASCII
+        # unicode strings, we can still provide a meaningful error.
+        matchee = _u('\xa7')
+        matcher = Equals(_u('a'))
+        expected = (
+            'Match failed. Matchee: %s\n'
+            'Matcher: %s\n'
+            'Difference: %s\n\n' % (
+                repr(matchee).replace("\\xa7", matchee),
+                matcher,
+                matcher.match(matchee).describe(),
+                ))
+        e = self.assertRaises(
+            self.failureException, self.assertThat, matchee, matcher,
+            verbose=True)
+        self.assertEqual(expected, self.get_error_string(e))
+
+    def test_assertEqual_nice_formatting(self):
+        message = "These things ought not be equal."
+        a = ['apple', 'banana', 'cherry']
+        b = {'Thatcher': 'One who mends roofs of straw',
+             'Major': 'A military officer, ranked below colonel',
+             'Blair': 'To shout loudly',
+             'Brown': 'The colour of healthy human faeces'}
+        expected_error = '\n'.join([
+            '!=:',
+            'reference = %s' % pformat(a),
+            'actual    = %s' % pformat(b),
+            ': ' + message,
+            ])
+        self.assertFails(expected_error, self.assertEqual, a, b, message)
+        self.assertFails(expected_error, self.assertEquals, a, b, message)
+        self.assertFails(expected_error, self.failUnlessEqual, a, b, message)
+
+    def test_assertEqual_formatting_no_message(self):
+        a = "cat"
+        b = "dog"
+        expected_error = "'cat' != 'dog'"
+        self.assertFails(expected_error, self.assertEqual, a, b)
+        self.assertFails(expected_error, self.assertEquals, a, b)
+        self.assertFails(expected_error, self.failUnlessEqual, a, b)
+
+    def test_assertEqual_non_ascii_str_with_newlines(self):
+        message = _u("Be careful mixing unicode and bytes")
+        a = "a\n\xa7\n"
+        b = "Just a longish string so the more verbose output form is used."
+        expected_error = '\n'.join([
+            '!=:',
+            "reference = '''\\",
+            'a',
+            repr('\xa7')[1:-1],
+            "'''",
+            'actual    = %r' % (b,),
+            ': ' + message,
+            ])
+        self.assertFails(expected_error, self.assertEqual, a, b, message)
+
+    def test_assertIsNone(self):
+        self.assertIsNone(None)
+
+        expected_error = 'None is not 0'
+        self.assertFails(expected_error, self.assertIsNone, 0)
+
+    def test_assertIsNotNone(self):
+        self.assertIsNotNone(0)
+        self.assertIsNotNone("0")
+
+        expected_error = 'None matches Is(None)'
+        self.assertFails(expected_error, self.assertIsNotNone, None)
+
+
+    def test_fail_preserves_traceback_detail(self):
+        class Test(TestCase):
+            def test(self):
+                self.addDetail('traceback', text_content('foo'))
+                self.fail('bar')
+        test = Test('test')
+        result = ExtendedTestResult()
+        test.run(result)
+        self.assertEqual(set(['traceback', 'traceback-1']),
+            set(result._events[1][2].keys()))
+
+
+class TestAddCleanup(TestCase):
+    """Tests for TestCase.addCleanup."""
+
+    run_test_with = FullStackRunTest
+
+    class LoggingTest(TestCase):
+        """A test that logs calls to setUp, runTest and tearDown."""
+
+        def setUp(self):
+            TestCase.setUp(self)
+            self._calls = ['setUp']
+
+        def brokenSetUp(self):
+            # A tearDown that deliberately fails.
+            self._calls = ['brokenSetUp']
+            raise RuntimeError('Deliberate Failure')
+
+        def runTest(self):
+            self._calls.append('runTest')
+
+        def brokenTest(self):
+            raise RuntimeError('Deliberate broken test')
+
+        def tearDown(self):
+            self._calls.append('tearDown')
+            TestCase.tearDown(self)
+
+    def setUp(self):
+        TestCase.setUp(self)
+        self._result_calls = []
+        self.test = TestAddCleanup.LoggingTest('runTest')
+        self.logging_result = LoggingResult(self._result_calls)
+
+    def assertErrorLogEqual(self, messages):
+        self.assertEqual(messages, [call[0] for call in self._result_calls])
+
+    def assertTestLogEqual(self, messages):
+        """Assert that the call log equals 'messages'."""
+        case = self._result_calls[0][1]
+        self.assertEqual(messages, case._calls)
+
+    def logAppender(self, message):
+        """A cleanup that appends 'message' to the tests log.
+
+        Cleanups are callables that are added to a test by addCleanup. To
+        verify that our cleanups run in the right order, we add strings to a
+        list that acts as a log. This method returns a cleanup that will add
+        the given message to that log when run.
+        """
+        self.test._calls.append(message)
+
+    def test_fixture(self):
+        # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
+        # This test doesn't test addCleanup itself, it just sanity checks the
+        # fixture.
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+    def test_cleanup_run_before_tearDown(self):
+        # Cleanup functions added with 'addCleanup' are called before tearDown
+        # runs.
+        self.test.addCleanup(self.logAppender, 'cleanup')
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
+
+    def test_add_cleanup_called_if_setUp_fails(self):
+        # Cleanup functions added with 'addCleanup' are called even if setUp
+        # fails. Note that tearDown has a different behavior: it is only
+        # called when setUp succeeds.
+        self.test.setUp = self.test.brokenSetUp
+        self.test.addCleanup(self.logAppender, 'cleanup')
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
+
+    def test_addCleanup_called_in_reverse_order(self):
+        # Cleanup functions added with 'addCleanup' are called in reverse
+        # order.
+        #
+        # One of the main uses of addCleanup is to dynamically create
+        # resources that need some sort of explicit tearDown. Often one
+        # resource will be created in terms of another, e.g.,
+        #     self.first = self.makeFirst()
+        #     self.second = self.makeSecond(self.first)
+        #
+        # When this happens, we generally want to clean up the second resource
+        # before the first one, since the second depends on the first.
+        self.test.addCleanup(self.logAppender, 'first')
+        self.test.addCleanup(self.logAppender, 'second')
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(
+            ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+    def test_tearDown_runs_after_cleanup_failure(self):
+        # tearDown runs even if a cleanup function fails.
+        self.test.addCleanup(lambda: 1/0)
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+    def test_cleanups_continue_running_after_error(self):
+        # All cleanups are always run, even if one or two of them fail.
+        self.test.addCleanup(self.logAppender, 'first')
+        self.test.addCleanup(lambda: 1/0)
+        self.test.addCleanup(self.logAppender, 'second')
+        self.test.run(self.logging_result)
+        self.assertTestLogEqual(
+            ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+    def test_error_in_cleanups_are_captured(self):
+        # If a cleanup raises an error, we want to record it and fail the the
+        # test, even though we go on to run other cleanups.
+        self.test.addCleanup(lambda: 1/0)
+        self.test.run(self.logging_result)
+        self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
+
+    def test_keyboard_interrupt_not_caught(self):
+        # If a cleanup raises KeyboardInterrupt, it gets reraised.
+        def raiseKeyboardInterrupt():
+            raise KeyboardInterrupt()
+        self.test.addCleanup(raiseKeyboardInterrupt)
+        self.assertThat(lambda:self.test.run(self.logging_result),
+            Raises(MatchesException(KeyboardInterrupt)))
+
+    def test_all_errors_from_MultipleExceptions_reported(self):
+        # When a MultipleExceptions exception is caught, all the errors are
+        # reported.
+        def raiseMany():
+            try:
+                1/0
+            except Exception:
+                exc_info1 = sys.exc_info()
+            try:
+                1/0
+            except Exception:
+                exc_info2 = sys.exc_info()
+            raise MultipleExceptions(exc_info1, exc_info2)
+        self.test.addCleanup(raiseMany)
+        self.logging_result = ExtendedTestResult()
+        self.test.run(self.logging_result)
+        self.assertEqual(['startTest', 'addError', 'stopTest'],
+            [event[0] for event in self.logging_result._events])
+        self.assertEqual(set(['traceback', 'traceback-1']),
+            set(self.logging_result._events[1][2].keys()))
+
+    def test_multipleCleanupErrorsReported(self):
+        # Errors from all failing cleanups are reported as separate backtraces.
+        self.test.addCleanup(lambda: 1/0)
+        self.test.addCleanup(lambda: 1/0)
+        self.logging_result = ExtendedTestResult()
+        self.test.run(self.logging_result)
+        self.assertEqual(['startTest', 'addError', 'stopTest'],
+            [event[0] for event in self.logging_result._events])
+        self.assertEqual(set(['traceback', 'traceback-1']),
+            set(self.logging_result._events[1][2].keys()))
+
+    def test_multipleErrorsCoreAndCleanupReported(self):
+        # Errors from all failing cleanups are reported, with stopTest,
+        # startTest inserted.
+        self.test = TestAddCleanup.LoggingTest('brokenTest')
+        self.test.addCleanup(lambda: 1/0)
+        self.test.addCleanup(lambda: 1/0)
+        self.logging_result = ExtendedTestResult()
+        self.test.run(self.logging_result)
+        self.assertEqual(['startTest', 'addError', 'stopTest'],
+            [event[0] for event in self.logging_result._events])
+        self.assertEqual(set(['traceback', 'traceback-1', 'traceback-2']),
+            set(self.logging_result._events[1][2].keys()))
+
+
+class TestRunTestUsage(TestCase):
+
+    def test_last_resort_in_place(self):
+        class TestBase(TestCase):
+            def test_base_exception(self):
+                raise SystemExit(0)
+        result = ExtendedTestResult()
+        test = TestBase("test_base_exception")
+        self.assertRaises(SystemExit, test.run, result)
+        self.assertFalse(result.wasSuccessful())
+
+
+class TestWithDetails(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    def assertDetailsProvided(self, case, expected_outcome, expected_keys):
+        """Assert that when case is run, details are provided to the result.
+
+        :param case: A TestCase to run.
+        :param expected_outcome: The call that should be made.
+        :param expected_keys: The keys to look for.
+        """
+        result = ExtendedTestResult()
+        case.run(result)
+        case = result._events[0][1]
+        expected = [
+            ('startTest', case),
+            (expected_outcome, case),
+            ('stopTest', case),
+            ]
+        self.assertEqual(3, len(result._events))
+        self.assertEqual(expected[0], result._events[0])
+        self.assertEqual(expected[1], result._events[1][0:2])
+        # Checking the TB is right is rather tricky. doctest line matching
+        # would help, but 'meh'.
+        self.assertEqual(sorted(expected_keys),
+            sorted(result._events[1][2].keys()))
+        self.assertEqual(expected[-1], result._events[-1])
+
+    def get_content(self):
+        return content.Content(
+            content.ContentType("text", "foo"), lambda: [_b('foo')])
+
+
+class TestExpectedFailure(TestWithDetails):
+    """Tests for expected failures and unexpected successess."""
+
+    run_test_with = FullStackRunTest
+
+    def make_unexpected_case(self):
+        class Case(TestCase):
+            def test(self):
+                raise testcase._UnexpectedSuccess
+        case = Case('test')
+        return case
+
+    def test_raising__UnexpectedSuccess_py27(self):
+        case = self.make_unexpected_case()
+        result = Python27TestResult()
+        case.run(result)
+        case = result._events[0][1]
+        self.assertEqual([
+            ('startTest', case),
+            ('addUnexpectedSuccess', case),
+            ('stopTest', case),
+            ], result._events)
+
+    def test_raising__UnexpectedSuccess_extended(self):
+        case = self.make_unexpected_case()
+        result = ExtendedTestResult()
+        case.run(result)
+        case = result._events[0][1]
+        self.assertEqual([
+            ('startTest', case),
+            ('addUnexpectedSuccess', case, {}),
+            ('stopTest', case),
+            ], result._events)
+
+    def make_xfail_case_xfails(self):
+        content = self.get_content()
+        class Case(TestCase):
+            def test(self):
+                self.addDetail("foo", content)
+                self.expectFailure("we are sad", self.assertEqual,
+                    1, 0)
+        case = Case('test')
+        return case
+
+    def make_xfail_case_succeeds(self):
+        content = self.get_content()
+        class Case(TestCase):
+            def test(self):
+                self.addDetail("foo", content)
+                self.expectFailure("we are sad", self.assertEqual,
+                    1, 1)
+        case = Case('test')
+        return case
+
+    def test_expectFailure_KnownFailure_extended(self):
+        case = self.make_xfail_case_xfails()
+        self.assertDetailsProvided(case, "addExpectedFailure",
+            ["foo", "traceback", "reason"])
+
+    def test_expectFailure_KnownFailure_unexpected_success(self):
+        case = self.make_xfail_case_succeeds()
+        self.assertDetailsProvided(case, "addUnexpectedSuccess",
+            ["foo", "reason"])
+
+    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
+    def test_unittest_expectedFailure_decorator_works_with_failure(self):
+        class ReferenceTest(TestCase):
+            @unittest.expectedFailure
+            def test_fails_expectedly(self):
+                self.assertEquals(1, 0)
+
+        test = ReferenceTest('test_fails_expectedly')
+        result = test.run()
+        self.assertEqual(True, result.wasSuccessful())
+
+    @skipIf(not hasattr(unittest, 'expectedFailure'), 'Need py27+')
+    def test_unittest_expectedFailure_decorator_works_with_success(self):
+        class ReferenceTest(TestCase):
+            @unittest.expectedFailure
+            def test_passes_unexpectedly(self):
+                self.assertEquals(1, 1)
+
+        test = ReferenceTest('test_passes_unexpectedly')
+        result = test.run()
+        self.assertEqual(False, result.wasSuccessful())
+
+
+class TestUniqueFactories(TestCase):
+    """Tests for getUniqueString and getUniqueInteger."""
+
+    run_test_with = FullStackRunTest
+
+    def test_getUniqueInteger(self):
+        # getUniqueInteger returns an integer that increments each time you
+        # call it.
+        one = self.getUniqueInteger()
+        self.assertEqual(1, one)
+        two = self.getUniqueInteger()
+        self.assertEqual(2, two)
+
+    def test_getUniqueString(self):
+        # getUniqueString returns the current test id followed by a unique
+        # integer.
+        name_one = self.getUniqueString()
+        self.assertEqual('%s-%d' % (self.id(), 1), name_one)
+        name_two = self.getUniqueString()
+        self.assertEqual('%s-%d' % (self.id(), 2), name_two)
+
+    def test_getUniqueString_prefix(self):
+        # If getUniqueString is given an argument, it uses that argument as
+        # the prefix of the unique string, rather than the test id.
+        name_one = self.getUniqueString('foo')
+        self.assertThat(name_one, Equals('foo-1'))
+        name_two = self.getUniqueString('bar')
+        self.assertThat(name_two, Equals('bar-2'))
+
+
+class TestCloneTestWithNewId(TestCase):
+    """Tests for clone_test_with_new_id."""
+
+    run_test_with = FullStackRunTest
+
+    def test_clone_test_with_new_id(self):
+        class FooTestCase(TestCase):
+            def test_foo(self):
+                pass
+        test = FooTestCase('test_foo')
+        oldName = test.id()
+        newName = self.getUniqueString()
+        newTest = clone_test_with_new_id(test, newName)
+        self.assertEqual(newName, newTest.id())
+        self.assertEqual(oldName, test.id(),
+            "the original test instance should be unchanged.")
+
+    def test_cloned_testcase_does_not_share_details(self):
+        """A cloned TestCase does not share the details dict."""
+        class Test(TestCase):
+            def test_foo(self):
+                self.addDetail(
+                    'foo', content.Content('text/plain', lambda: 'foo'))
+        orig_test = Test('test_foo')
+        cloned_test = clone_test_with_new_id(orig_test, self.getUniqueString())
+        orig_test.run(unittest.TestResult())
+        self.assertEqual('foo', orig_test.getDetails()['foo'].iter_bytes())
+        self.assertEqual(None, cloned_test.getDetails().get('foo'))
+
+
+class TestDetailsProvided(TestWithDetails):
+
+    run_test_with = FullStackRunTest
+
+    def test_addDetail(self):
+        mycontent = self.get_content()
+        self.addDetail("foo", mycontent)
+        details = self.getDetails()
+        self.assertEqual({"foo": mycontent}, details)
+
+    def test_addError(self):
+        class Case(TestCase):
+            def test(this):
+                this.addDetail("foo", self.get_content())
+                1/0
+        self.assertDetailsProvided(Case("test"), "addError",
+            ["foo", "traceback"])
+
+    def test_addFailure(self):
+        class Case(TestCase):
+            def test(this):
+                this.addDetail("foo", self.get_content())
+                self.fail('yo')
+        self.assertDetailsProvided(Case("test"), "addFailure",
+            ["foo", "traceback"])
+
+    def test_addSkip(self):
+        class Case(TestCase):
+            def test(this):
+                this.addDetail("foo", self.get_content())
+                self.skip('yo')
+        self.assertDetailsProvided(Case("test"), "addSkip",
+            ["foo", "reason"])
+
+    def test_addSucccess(self):
+        class Case(TestCase):
+            def test(this):
+                this.addDetail("foo", self.get_content())
+        self.assertDetailsProvided(Case("test"), "addSuccess",
+            ["foo"])
+
+    def test_addUnexpectedSuccess(self):
+        class Case(TestCase):
+            def test(this):
+                this.addDetail("foo", self.get_content())
+                raise testcase._UnexpectedSuccess()
+        self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
+            ["foo"])
+
+    def test_addDetails_from_Mismatch(self):
+        content = self.get_content()
+        class Mismatch(object):
+            def describe(self):
+                return "Mismatch"
+            def get_details(self):
+                return {"foo": content}
+        class Matcher(object):
+            def match(self, thing):
+                return Mismatch()
+            def __str__(self):
+                return "a description"
+        class Case(TestCase):
+            def test(self):
+                self.assertThat("foo", Matcher())
+        self.assertDetailsProvided(Case("test"), "addFailure",
+            ["foo", "traceback"])
+
+    def test_multiple_addDetails_from_Mismatch(self):
+        content = self.get_content()
+        class Mismatch(object):
+            def describe(self):
+                return "Mismatch"
+            def get_details(self):
+                return {"foo": content, "bar": content}
+        class Matcher(object):
+            def match(self, thing):
+                return Mismatch()
+            def __str__(self):
+                return "a description"
+        class Case(TestCase):
+            def test(self):
+                self.assertThat("foo", Matcher())
+        self.assertDetailsProvided(Case("test"), "addFailure",
+            ["bar", "foo", "traceback"])
+
+    def test_addDetails_with_same_name_as_key_from_get_details(self):
+        content = self.get_content()
+        class Mismatch(object):
+            def describe(self):
+                return "Mismatch"
+            def get_details(self):
+                return {"foo": content}
+        class Matcher(object):
+            def match(self, thing):
+                return Mismatch()
+            def __str__(self):
+                return "a description"
+        class Case(TestCase):
+            def test(self):
+                self.addDetail("foo", content)
+                self.assertThat("foo", Matcher())
+        self.assertDetailsProvided(Case("test"), "addFailure",
+            ["foo", "foo-1", "traceback"])
+
+    def test_addDetailUniqueName_works(self):
+        content = self.get_content()
+        class Case(TestCase):
+            def test(self):
+                self.addDetailUniqueName("foo", content)
+                self.addDetailUniqueName("foo", content)
+        self.assertDetailsProvided(Case("test"), "addSuccess",
+            ["foo", "foo-1"])
+
+
+class TestSetupTearDown(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    def test_setUpCalledTwice(self):
+        class CallsTooMuch(TestCase):
+            def test_method(self):
+                self.setUp()
+        result = unittest.TestResult()
+        CallsTooMuch('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+    def test_setUpNotCalled(self):
+        class DoesnotcallsetUp(TestCase):
+            def setUp(self):
+                pass
+            def test_method(self):
+                pass
+        result = unittest.TestResult()
+        DoesnotcallsetUp('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+    def test_tearDownCalledTwice(self):
+        class CallsTooMuch(TestCase):
+            def test_method(self):
+                self.tearDown()
+        result = unittest.TestResult()
+        CallsTooMuch('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+    def test_tearDownNotCalled(self):
+        class DoesnotcalltearDown(TestCase):
+            def test_method(self):
+                pass
+            def tearDown(self):
+                pass
+        result = unittest.TestResult()
+        DoesnotcalltearDown('test_method').run(result)
+        self.assertThat(result.errors, HasLength(1))
+        self.assertThat(result.errors[0][1],
+            DocTestMatches(
+                "...ValueError...File...testtools/tests/test_testcase.py...",
+                ELLIPSIS))
+
+
+require_py27_minimum = skipIf(
+    sys.version < '2.7',
+    "Requires python 2.7 or greater"
+)
+
+
+class TestSkipping(TestCase):
+    """Tests for skipping of tests functionality."""
+
+    run_test_with = FullStackRunTest
+
+    def test_skip_causes_skipException(self):
+        self.assertThat(lambda:self.skip("Skip this test"),
+            Raises(MatchesException(self.skipException)))
+
+    def test_can_use_skipTest(self):
+        self.assertThat(lambda:self.skipTest("Skip this test"),
+            Raises(MatchesException(self.skipException)))
+
+    def test_skip_without_reason_works(self):
+        class Test(TestCase):
+            def test(self):
+                raise self.skipException()
+        case = Test("test")
+        result = ExtendedTestResult()
+        case.run(result)
+        self.assertEqual('addSkip', result._events[1][0])
+        self.assertEqual('no reason given.',
+            result._events[1][2]['reason'].as_text())
+
+    def test_skipException_in_setup_calls_result_addSkip(self):
+        class TestThatRaisesInSetUp(TestCase):
+            def setUp(self):
+                TestCase.setUp(self)
+                self.skip("skipping this test")
+            def test_that_passes(self):
+                pass
+        calls = []
+        result = LoggingResult(calls)
+        test = TestThatRaisesInSetUp("test_that_passes")
+        test.run(result)
+        case = result._events[0][1]
+        self.assertEqual([('startTest', case),
+            ('addSkip', case, "skipping this test"), ('stopTest', case)],
+            calls)
+
+    def test_skipException_in_test_method_calls_result_addSkip(self):
+        class SkippingTest(TestCase):
+            def test_that_raises_skipException(self):
+                self.skip("skipping this test")
+        result = Python27TestResult()
+        test = SkippingTest("test_that_raises_skipException")
+        test.run(result)
+        case = result._events[0][1]
+        self.assertEqual([('startTest', case),
+            ('addSkip', case, "skipping this test"), ('stopTest', case)],
+            result._events)
+
+    def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
+        class SkippingTest(TestCase):
+            def setUp(self):
+                TestCase.setUp(self)
+                raise self.skipException("skipping this test")
+            def test_that_raises_skipException(self):
+                pass
+        result = Python26TestResult()
+        test = SkippingTest("test_that_raises_skipException")
+        test.run(result)
+        self.assertEqual('addSuccess', result._events[1][0])
+
+    def test_skip_with_old_result_object_calls_addError(self):
+        class SkippingTest(TestCase):
+            def test_that_raises_skipException(self):
+                raise self.skipException("skipping this test")
+        result = Python26TestResult()
+        test = SkippingTest("test_that_raises_skipException")
+        test.run(result)
+        self.assertEqual('addSuccess', result._events[1][0])
+
+    def test_skip_decorator(self):
+        class SkippingTest(TestCase):
+            @skip("skipping this test")
+            def test_that_is_decorated_with_skip(self):
+                self.fail()
+        result = Python26TestResult()
+        test = SkippingTest("test_that_is_decorated_with_skip")
+        test.run(result)
+        self.assertEqual('addSuccess', result._events[1][0])
+
+    def test_skipIf_decorator(self):
+        class SkippingTest(TestCase):
+            @skipIf(True, "skipping this test")
+            def test_that_is_decorated_with_skipIf(self):
+                self.fail()
+        result = Python26TestResult()
+        test = SkippingTest("test_that_is_decorated_with_skipIf")
+        test.run(result)
+        self.assertEqual('addSuccess', result._events[1][0])
+
+    def test_skipUnless_decorator(self):
+        class SkippingTest(TestCase):
+            @skipUnless(False, "skipping this test")
+            def test_that_is_decorated_with_skipUnless(self):
+                self.fail()
+        result = Python26TestResult()
+        test = SkippingTest("test_that_is_decorated_with_skipUnless")
+        test.run(result)
+        self.assertEqual('addSuccess', result._events[1][0])
+
+    def check_skip_decorator_does_not_run_setup(self, decorator, reason):
+        class SkippingTest(TestCase):
+
+            setup_ran = False
+
+            def setUp(self):
+                super(SkippingTest, self).setUp()
+                self.setup_ran = True
+
+            # Use the decorator passed to us:
+            @decorator
+            def test_skipped(self):
+                self.fail()
+
+        test = SkippingTest('test_skipped')
+        result = test.run()
+        self.assertTrue(result.wasSuccessful())
+        self.assertTrue(reason in result.skip_reasons, result.skip_reasons)
+        self.assertFalse(test.setup_ran)
+
+    def test_testtools_skip_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skip(reason),
+            reason
+        )
+
+    def test_testtools_skipIf_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skipIf(True, reason),
+            reason
+        )
+
+    def test_testtools_skipUnless_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            skipUnless(False, reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skip_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skip(reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skipIf_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skipIf(True, reason),
+            reason
+        )
+
+    @require_py27_minimum
+    def test_unittest_skipUnless_decorator_does_not_run_setUp(self):
+        reason = self.getUniqueString()
+        self.check_skip_decorator_does_not_run_setup(
+            unittest.skipUnless(False, reason),
+            reason
+        )
+
+
+class TestOnException(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    def test_default_works(self):
+        events = []
+        class Case(TestCase):
+            def method(self):
+                self.onException(an_exc_info)
+                events.append(True)
+        case = Case("method")
+        case.run()
+        self.assertThat(events, Equals([True]))
+
+    def test_added_handler_works(self):
+        events = []
+        class Case(TestCase):
+            def method(self):
+                self.addOnException(events.append)
+                self.onException(an_exc_info)
+        case = Case("method")
+        case.run()
+        self.assertThat(events, Equals([an_exc_info]))
+
+    def test_handler_that_raises_is_not_caught(self):
+        events = []
+        class Case(TestCase):
+            def method(self):
+                self.addOnException(events.index)
+                self.assertThat(lambda: self.onException(an_exc_info),
+                    Raises(MatchesException(ValueError)))
+        case = Case("method")
+        case.run()
+        self.assertThat(events, Equals([]))
+
+
+class TestPatchSupport(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    class Case(TestCase):
+        def test(self):
+            pass
+
+    def test_patch(self):
+        # TestCase.patch masks obj.attribute with the new value.
+        self.foo = 'original'
+        test = self.Case('test')
+        test.patch(self, 'foo', 'patched')
+        self.assertEqual('patched', self.foo)
+
+    def test_patch_restored_after_run(self):
+        # TestCase.patch masks obj.attribute with the new value, but restores
+        # the original value after the test is finished.
+        self.foo = 'original'
+        test = self.Case('test')
+        test.patch(self, 'foo', 'patched')
+        test.run()
+        self.assertEqual('original', self.foo)
+
+    def test_successive_patches_apply(self):
+        # TestCase.patch can be called multiple times per test. Each time you
+        # call it, it overrides the original value.
+        self.foo = 'original'
+        test = self.Case('test')
+        test.patch(self, 'foo', 'patched')
+        test.patch(self, 'foo', 'second')
+        self.assertEqual('second', self.foo)
+
+    def test_successive_patches_restored_after_run(self):
+        # TestCase.patch restores the original value, no matter how many times
+        # it was called.
+        self.foo = 'original'
+        test = self.Case('test')
+        test.patch(self, 'foo', 'patched')
+        test.patch(self, 'foo', 'second')
+        test.run()
+        self.assertEqual('original', self.foo)
+
+    def test_patch_nonexistent_attribute(self):
+        # TestCase.patch can be used to patch a non-existent attribute.
+        test = self.Case('test')
+        test.patch(self, 'doesntexist', 'patched')
+        self.assertEqual('patched', self.doesntexist)
+
+    def test_restore_nonexistent_attribute(self):
+        # TestCase.patch can be used to patch a non-existent attribute, after
+        # the test run, the attribute is then removed from the object.
+        test = self.Case('test')
+        test.patch(self, 'doesntexist', 'patched')
+        test.run()
+        marker = object()
+        value = getattr(self, 'doesntexist', marker)
+        self.assertIs(marker, value)
+
+
+class TestTestCaseSuper(TestCase):
+
+    run_test_with = FullStackRunTest
+
+    def test_setup_uses_super(self):
+        class OtherBaseCase(unittest.TestCase):
+            setup_called = False
+            def setUp(self):
+                self.setup_called = True
+                super(OtherBaseCase, self).setUp()
+        class OurCase(TestCase, OtherBaseCase):
+            def runTest(self):
+                pass
+        test = OurCase()
+        test.setUp()
+        test.tearDown()
+        self.assertTrue(test.setup_called)
+
+    def test_teardown_uses_super(self):
+        class OtherBaseCase(unittest.TestCase):
+            teardown_called = False
+            def tearDown(self):
+                self.teardown_called = True
+                super(OtherBaseCase, self).tearDown()
+        class OurCase(TestCase, OtherBaseCase):
+            def runTest(self):
+                pass
+        test = OurCase()
+        test.setUp()
+        test.tearDown()
+        self.assertTrue(test.teardown_called)
+
+
+class TestNullary(TestCase):
+
+    def test_repr(self):
+        # The repr() of nullary is the same as the repr() of the wrapped
+        # function.
+        def foo():
+            pass
+        wrapped = Nullary(foo)
+        self.assertEqual(repr(wrapped), repr(foo))
+
+    def test_called_with_arguments(self):
+        # The function is called with the arguments given to Nullary's
+        # constructor.
+        l = []
+        def foo(*args, **kwargs):
+            l.append((args, kwargs))
+        wrapped = Nullary(foo, 1, 2, a="b")
+        wrapped()
+        self.assertEqual(l, [((1, 2), {'a': 'b'})])
+
+    def test_returns_wrapped(self):
+        # Calling Nullary returns whatever the function returns.
+        ret = object()
+        wrapped = Nullary(lambda: ret)
+        self.assertIs(ret, wrapped())
+
+    def test_raises(self):
+        # If the function raises, so does Nullary when called.
+        wrapped = Nullary(lambda: 1/0)
+        self.assertRaises(ZeroDivisionError, wrapped)
+
+
+class TestAttributes(TestCase):
+
+    def test_simple_attr(self):
+        # Adding an attr to a test changes its id().
+        class MyTest(WithAttributes, TestCase):
+            @attr('foo')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual('testtools.tests.test_testcase.MyTest.test_bar[foo]',
+            case.id())
+
+    def test_multiple_attributes(self):
+        class MyTest(WithAttributes, TestCase):
+            # Not sorted here, forward or backwards.
+            @attr('foo', 'quux', 'bar')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual(
+            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+            case.id())
+
+    def test_multiple_attr_decorators(self):
+        class MyTest(WithAttributes, TestCase):
+            # Not sorted here, forward or backwards.
+            @attr('bar')
+            @attr('quux')
+            @attr('foo')
+            def test_bar(self):
+                pass
+        case = MyTest('test_bar')
+        self.assertEqual(
+            'testtools.tests.test_testcase.MyTest.test_bar[bar,foo,quux]',
+            case.id())
+
+
+class TestDecorateTestCaseResult(TestCase):
+
+    def setUp(self):
+        super(TestDecorateTestCaseResult, self).setUp()
+        self.log = []
+
+    def make_result(self, result):
+        self.log.append(('result', result))
+        return LoggingResult(self.log)
+
+    def test___call__(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+        case(None)
+        case('something')
+        self.assertEqual([('result', None),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            ('result', 'something'),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set())
+            ], self.log)
+
+    def test_run(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result)
+        case.run(None)
+        case.run('something')
+        self.assertEqual([('result', None),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            ('result', 'something'),
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set())
+            ], self.log)
+
+    def test_before_after_hooks(self):
+        case = DecorateTestCaseResult(PlaceHolder('foo'), self.make_result,
+            before_run=lambda result: self.log.append('before'),
+            after_run=lambda result: self.log.append('after'))
+        case.run(None)
+        case(None)
+        self.assertEqual([
+            ('result', None),
+            'before',
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            'after',
+            ('result', None),
+            'before',
+            ('tags', set(), set()),
+            ('startTest', case.decorated),
+            ('addSuccess', case.decorated),
+            ('stopTest', case.decorated),
+            ('tags', set(), set()),
+            'after',
+            ], self.log)
+
+    def test_other_attribute(self):
+        orig = PlaceHolder('foo')
+        orig.thing = 'fred'
+        case = DecorateTestCaseResult(orig, self.make_result)
+        self.assertEqual('fred', case.thing)
+        self.assertRaises(AttributeError, getattr, case, 'other')
+        case.other = 'barbara'
+        self.assertEqual('barbara', orig.other)
+        del case.thing
+        self.assertRaises(AttributeError, getattr, orig, 'thing')
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_testresult.py b/third_party/testtools/testtools/tests/test_testresult.py
new file mode 100644
index 0000000..a8034b2
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_testresult.py
@@ -0,0 +1,2913 @@
+# Copyright (c) 2008-2012 testtools developers. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import codecs
+import datetime
+import doctest
+from itertools import chain, combinations
+import os
+import re
+import shutil
+import sys
+import tempfile
+import threading
+from unittest import TestSuite
+import warnings
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+from testtools import (
+    CopyStreamResult,
+    ExtendedToOriginalDecorator,
+    ExtendedToStreamDecorator,
+    MultiTestResult,
+    PlaceHolder,
+    StreamFailFast,
+    StreamResult,
+    StreamResultRouter,
+    StreamSummary,
+    StreamTagger,
+    StreamToDict,
+    StreamToExtendedDecorator,
+    StreamToQueue,
+    Tagger,
+    TestCase,
+    TestControl,
+    TestResult,
+    TestResultDecorator,
+    TestByTestResult,
+    TextTestResult,
+    ThreadsafeForwardingResult,
+    TimestampingStreamResult,
+    testresult,
+    )
+from testtools.compat import (
+    _b,
+    _get_exception_encoding,
+    _r,
+    _u,
+    advance_iterator,
+    str_is_unicode,
+    StringIO,
+    )
+from testtools.content import (
+    Content,
+    content_from_stream,
+    text_content,
+    TracebackContent,
+    )
+from testtools.content_type import ContentType, UTF8_TEXT
+from testtools.matchers import (
+    AllMatch,
+    Contains,
+    DocTestMatches,
+    Equals,
+    HasLength,
+    MatchesAny,
+    MatchesException,
+    MatchesRegex,
+    Raises,
+    )
+from testtools.tests.helpers import (
+    an_exc_info,
+    FullStackRunTest,
+    LoggingResult,
+    run_with_stack_hidden,
+    )
+from testtools.testresult.doubles import (
+    Python26TestResult,
+    Python27TestResult,
+    ExtendedTestResult,
+    StreamResult as LoggingStreamResult,
+    )
+from testtools.testresult.real import (
+    _details_to_str,
+    _merge_tags,
+    utc,
+    )
+
+
+def make_erroring_test():
+    class Test(TestCase):
+        def error(self):
+            1/0
+    return Test("error")
+
+
+def make_failing_test():
+    class Test(TestCase):
+        def failed(self):
+            self.fail("yo!")
+    return Test("failed")
+
+
+def make_mismatching_test():
+    class Test(TestCase):
+        def mismatch(self):
+            self.assertEqual(1, 2)
+    return Test("mismatch")
+
+
+def make_unexpectedly_successful_test():
+    class Test(TestCase):
+        def succeeded(self):
+            self.expectFailure("yo!", lambda: None)
+    return Test("succeeded")
+
+
+def make_test():
+    class Test(TestCase):
+        def test(self):
+            pass
+    return Test("test")
+
+
+def make_exception_info(exceptionFactory, *args, **kwargs):
+    try:
+        raise exceptionFactory(*args, **kwargs)
+    except:
+        return sys.exc_info()
+
+
+class Python26Contract(object):
+
+    def test_fresh_result_is_successful(self):
+        # A result is considered successful before any tests are run.
+        result = self.makeResult()
+        self.assertTrue(result.wasSuccessful())
+
+    def test_addError_is_failure(self):
+        # addError fails the test run.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addError(self, an_exc_info)
+        result.stopTest(self)
+        self.assertFalse(result.wasSuccessful())
+
+    def test_addFailure_is_failure(self):
+        # addFailure fails the test run.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addFailure(self, an_exc_info)
+        result.stopTest(self)
+        self.assertFalse(result.wasSuccessful())
+
+    def test_addSuccess_is_success(self):
+        # addSuccess does not fail the test run.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addSuccess(self)
+        result.stopTest(self)
+        self.assertTrue(result.wasSuccessful())
+
+    def test_stop_sets_shouldStop(self):
+        result = self.makeResult()
+        result.stop()
+        self.assertTrue(result.shouldStop)
+
+
+class Python27Contract(Python26Contract):
+
+    def test_addExpectedFailure(self):
+        # Calling addExpectedFailure(test, exc_info) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addExpectedFailure(self, an_exc_info)
+
+    def test_addExpectedFailure_is_success(self):
+        # addExpectedFailure does not fail the test run.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addExpectedFailure(self, an_exc_info)
+        result.stopTest(self)
+        self.assertTrue(result.wasSuccessful())
+
+    def test_addSkipped(self):
+        # Calling addSkip(test, reason) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addSkip(self, _u("Skipped for some reason"))
+
+    def test_addSkip_is_success(self):
+        # addSkip does not fail the test run.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addSkip(self, _u("Skipped for some reason"))
+        result.stopTest(self)
+        self.assertTrue(result.wasSuccessful())
+
+    def test_addUnexpectedSuccess(self):
+        # Calling addUnexpectedSuccess(test) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addUnexpectedSuccess(self)
+
+    def test_addUnexpectedSuccess_was_successful(self):
+        # addUnexpectedSuccess does not fail the test run in Python 2.7.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addUnexpectedSuccess(self)
+        result.stopTest(self)
+        self.assertTrue(result.wasSuccessful())
+
+    def test_startStopTestRun(self):
+        # Calling startTestRun completes ok.
+        result = self.makeResult()
+        result.startTestRun()
+        result.stopTestRun()
+
+    def test_failfast(self):
+        result = self.makeResult()
+        result.failfast = True
+        class Failing(TestCase):
+            def test_a(self):
+                self.fail('a')
+            def test_b(self):
+                self.fail('b')
+        TestSuite([Failing('test_a'), Failing('test_b')]).run(result)
+        self.assertEqual(1, result.testsRun)
+
+
+class TagsContract(Python27Contract):
+    """Tests to ensure correct tagging behaviour.
+
+    See the subunit docs for guidelines on how this is supposed to work.
+    """
+
+    def test_no_tags_by_default(self):
+        # Results initially have no tags.
+        result = self.makeResult()
+        result.startTestRun()
+        self.assertEqual(frozenset(), result.current_tags)
+
+    def test_adding_tags(self):
+        # Tags are added using 'tags' and thus become visible in
+        # 'current_tags'.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        self.assertEqual(set(['foo']), result.current_tags)
+
+    def test_removing_tags(self):
+        # Tags are removed using 'tags'.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.tags(set(), set(['foo']))
+        self.assertEqual(set(), result.current_tags)
+
+    def test_startTestRun_resets_tags(self):
+        # startTestRun makes a new test run, and thus clears all the tags.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.startTestRun()
+        self.assertEqual(set(), result.current_tags)
+
+    def test_add_tags_within_test(self):
+        # Tags can be added after a test has run.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.startTest(self)
+        result.tags(set(['bar']), set())
+        self.assertEqual(set(['foo', 'bar']), result.current_tags)
+
+    def test_tags_added_in_test_are_reverted(self):
+        # Tags added during a test run are then reverted once that test has
+        # finished.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.startTest(self)
+        result.tags(set(['bar']), set())
+        result.addSuccess(self)
+        result.stopTest(self)
+        self.assertEqual(set(['foo']), result.current_tags)
+
+    def test_tags_removed_in_test(self):
+        # Tags can be removed during tests.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.startTest(self)
+        result.tags(set(), set(['foo']))
+        self.assertEqual(set(), result.current_tags)
+
+    def test_tags_removed_in_test_are_restored(self):
+        # Tags removed during tests are restored once that test has finished.
+        result = self.makeResult()
+        result.startTestRun()
+        result.tags(set(['foo']), set())
+        result.startTest(self)
+        result.tags(set(), set(['foo']))
+        result.addSuccess(self)
+        result.stopTest(self)
+        self.assertEqual(set(['foo']), result.current_tags)
+
+
+class DetailsContract(TagsContract):
+    """Tests for the details API of TestResults."""
+
+    def test_addExpectedFailure_details(self):
+        # Calling addExpectedFailure(test, details=xxx) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addExpectedFailure(self, details={})
+
+    def test_addError_details(self):
+        # Calling addError(test, details=xxx) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addError(self, details={})
+
+    def test_addFailure_details(self):
+        # Calling addFailure(test, details=xxx) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addFailure(self, details={})
+
+    def test_addSkipped_details(self):
+        # Calling addSkip(test, reason) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addSkip(self, details={})
+
+    def test_addUnexpectedSuccess_details(self):
+        # Calling addUnexpectedSuccess(test) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addUnexpectedSuccess(self, details={})
+
+    def test_addSuccess_details(self):
+        # Calling addSuccess(test) completes ok.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addSuccess(self, details={})
+
+
+class FallbackContract(DetailsContract):
+    """When we fallback we take our policy choice to map calls.
+
+    For instance, we map unexpectedSuccess to an error code, not to success.
+    """
+
+    def test_addUnexpectedSuccess_was_successful(self):
+        # addUnexpectedSuccess fails test run in testtools.
+        result = self.makeResult()
+        result.startTest(self)
+        result.addUnexpectedSuccess(self)
+        result.stopTest(self)
+        self.assertFalse(result.wasSuccessful())
+
+
+class StartTestRunContract(FallbackContract):
+    """Defines the contract for testtools policy choices.
+
+    That is things which are not simply extensions to unittest but choices we
+    have made differently.
+    """
+
+    def test_startTestRun_resets_unexpected_success(self):
+        result = self.makeResult()
+        result.startTest(self)
+        result.addUnexpectedSuccess(self)
+        result.stopTest(self)
+        result.startTestRun()
+        self.assertTrue(result.wasSuccessful())
+
+    def test_startTestRun_resets_failure(self):
+        result = self.makeResult()
+        result.startTest(self)
+        result.addFailure(self, an_exc_info)
+        result.stopTest(self)
+        result.startTestRun()
+        self.assertTrue(result.wasSuccessful())
+
+    def test_startTestRun_resets_errors(self):
+        result = self.makeResult()
+        result.startTest(self)
+        result.addError(self, an_exc_info)
+        result.stopTest(self)
+        result.startTestRun()
+        self.assertTrue(result.wasSuccessful())
+
+
+class TestTestResultContract(TestCase, StartTestRunContract):
+
+    run_test_with = FullStackRunTest
+
+    def makeResult(self):
+        return TestResult()
+
+
+class TestMultiTestResultContract(TestCase, StartTestRunContract):
+
+    run_test_with = FullStackRunTest
+
+    def makeResult(self):
+        return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestCase, StartTestRunContract):
+
+    run_test_with = FullStackRunTest
+
+    def makeResult(self):
+        return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestCase, StartTestRunContract):
+
+    run_test_with = FullStackRunTest
+
+    def makeResult(self):
+        result_semaphore = threading.Semaphore(1)
+        target = TestResult()
+        return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestExtendedTestResultContract(TestCase, StartTestRunContract):
+
+    def makeResult(self):
+        return ExtendedTestResult()
+
+
+class TestPython26TestResultContract(TestCase, Python26Contract):
+
+    def makeResult(self):
+        return Python26TestResult()
+
+
+class TestAdaptedPython26TestResultContract(TestCase, FallbackContract):
+
+    def makeResult(self):
+        return ExtendedToOriginalDecorator(Python26TestResult())
+
+
+class TestPython27TestResultContract(TestCase, Python27Contract):
+
+    def makeResult(self):
+        return Python27TestResult()
+
+
+class TestAdaptedPython27TestResultContract(TestCase, DetailsContract):
+
+    def makeResult(self):
+        return ExtendedToOriginalDecorator(Python27TestResult())
+
+
+class TestAdaptedStreamResult(TestCase, DetailsContract):
+
+    def makeResult(self):
+        return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestTestResultDecoratorContract(TestCase, StartTestRunContract):
+
+    run_test_with = FullStackRunTest
+
+    def makeResult(self):
+        return TestResultDecorator(TestResult())
+
+
+# DetailsContract because ExtendedToStreamDecorator follows Python for
+# uxsuccess handling.
+class TestStreamToExtendedContract(TestCase, DetailsContract):
+
+    def makeResult(self):
+        return ExtendedToStreamDecorator(
+            StreamToExtendedDecorator(ExtendedTestResult()))
+
+
+class TestStreamResultContract(object):
+
+    def _make_result(self):
+        raise NotImplementedError(self._make_result)
+
+    def test_startTestRun(self):
+        result = self._make_result()
+        result.startTestRun()
+        result.stopTestRun()
+
+    def test_files(self):
+        # Test parameter combinations when files are being emitted.
+        result = self._make_result()
+        result.startTestRun()
+        self.addCleanup(result.stopTestRun)
+        now = datetime.datetime.now(utc)
+        inputs = list(dict(
+            eof=True,
+            mime_type="text/plain",
+            route_code=_u("1234"),
+            test_id=_u("foo"),
+            timestamp=now,
+            ).items())
+        param_dicts = self._power_set(inputs)
+        for kwargs in param_dicts:
+            result.status(file_name=_u("foo"), file_bytes=_b(""), **kwargs)
+            result.status(file_name=_u("foo"), file_bytes=_b("bar"), **kwargs)
+
+    def test_test_status(self):
+        # Tests non-file attachment parameter combinations.
+        result = self._make_result()
+        result.startTestRun()
+        self.addCleanup(result.stopTestRun)
+        now = datetime.datetime.now(utc)
+        args = [[_u("foo"), s] for s in ['exists', 'inprogress', 'xfail',
+            'uxsuccess', 'success', 'fail', 'skip']]
+        inputs = list(dict(
+            runnable=False,
+            test_tags=set(['quux']),
+            route_code=_u("1234"),
+            timestamp=now,
+            ).items())
+        param_dicts = self._power_set(inputs)
+        for kwargs in param_dicts:
+            for arg in args:
+                result.status(test_id=arg[0], test_status=arg[1], **kwargs)
+
+    def _power_set(self, iterable):
+        "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
+        s = list(iterable)
+        param_dicts = []
+        for ss in chain.from_iterable(combinations(s, r) for r in range(len(s)+1)):
+            param_dicts.append(dict(ss))
+        return param_dicts
+
+
+class TestBaseStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamResult()
+
+
+class TestCopyStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return CopyStreamResult([StreamResult(), StreamResult()])
+
+
+class TestDoubleStreamResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return LoggingStreamResult()
+
+
+class TestExtendedToStreamDecoratorContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return ExtendedToStreamDecorator(StreamResult())
+
+
+class TestStreamSummaryResultContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamSummary()
+
+
+class TestStreamTaggerContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamTagger([StreamResult()], add=set(), discard=set())
+
+
+class TestStreamToDictContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamToDict(lambda x:None)
+
+
+class TestStreamToExtendedDecoratorContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamToExtendedDecorator(ExtendedTestResult())
+
+
+class TestStreamToQueueContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        queue = Queue()
+        return StreamToQueue(queue, "foo")
+
+
+class TestStreamFailFastContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamFailFast(lambda:None)
+
+
+class TestStreamResultRouterContract(TestCase, TestStreamResultContract):
+
+    def _make_result(self):
+        return StreamResultRouter(StreamResult())
+
+
+class TestDoubleStreamResultEvents(TestCase):
+
+    def test_startTestRun(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        self.assertEqual([('startTestRun',)], result._events)
+
+    def test_stopTestRun(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        result.stopTestRun()
+        self.assertEqual([('startTestRun',), ('stopTestRun',)], result._events)
+
+    def test_file(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.status(file_name="foo", file_bytes="bar", eof=True, mime_type="text/json",
+            test_id="id", route_code='abc', timestamp=now)
+        self.assertEqual(
+            [('startTestRun',),
+             ('status', 'id', None, None, True, 'foo', 'bar', True, 'text/json', 'abc', now)],
+            result._events)
+
+    def test_status(self):
+        result = LoggingStreamResult()
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.status("foo", "success", test_tags=set(['tag']),
+            runnable=False, route_code='abc', timestamp=now)
+        self.assertEqual(
+            [('startTestRun',),
+             ('status', 'foo', 'success', set(['tag']), False, None, None, False, None, 'abc', now)],
+            result._events)
+
+
+class TestCopyStreamResultCopies(TestCase):
+
+    def setUp(self):
+        super(TestCopyStreamResultCopies, self).setUp()
+        self.target1 = LoggingStreamResult()
+        self.target2 = LoggingStreamResult()
+        self.targets = [self.target1._events, self.target2._events]
+        self.result = CopyStreamResult([self.target1, self.target2])
+
+    def test_startTestRun(self):
+        self.result.startTestRun()
+        self.assertThat(self.targets, AllMatch(Equals([('startTestRun',)])))
+
+    def test_stopTestRun(self):
+        self.result.startTestRun()
+        self.result.stopTestRun()
+        self.assertThat(self.targets,
+            AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+
+    def test_status(self):
+        self.result.startTestRun()
+        now = datetime.datetime.now(utc)
+        self.result.status("foo", "success", test_tags=set(['tag']),
+            runnable=False, file_name="foo", file_bytes=b'bar', eof=True,
+            mime_type="text/json", route_code='abc', timestamp=now)
+        self.assertThat(self.targets,
+            AllMatch(Equals([('startTestRun',),
+                ('status', 'foo', 'success', set(['tag']), False, "foo",
+                 b'bar', True, "text/json", 'abc', now)
+                ])))
+
+
+class TestStreamTagger(TestCase):
+
+    def test_adding(self):
+        log = LoggingStreamResult()
+        result = StreamTagger([log], add=['foo'])
+        result.startTestRun()
+        result.status()
+        result.status(test_tags=set(['bar']))
+        result.status(test_tags=None)
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['foo', 'bar']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['foo']), True, None, None, False, None, None, None),
+            ('stopTestRun',),
+            ], log._events)
+
+    def test_discarding(self):
+        log = LoggingStreamResult()
+        result = StreamTagger([log], discard=['foo'])
+        result.startTestRun()
+        result.status()
+        result.status(test_tags=None)
+        result.status(test_tags=set(['foo']))
+        result.status(test_tags=set(['bar']))
+        result.status(test_tags=set(['foo', 'bar']))
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, None, True, None, None, False, None, None, None),
+            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+            ('status', None, None, set(['bar']), True, None, None, False, None, None, None),
+            ('stopTestRun',),
+            ], log._events)
+
+
+class TestStreamToDict(TestCase):
+
+    def test_hung_test(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status('foo', 'inprogress')
+        self.assertEqual([], tests)
+        result.stopTestRun()
+        self.assertEqual([
+            {'id': 'foo', 'tags': set(), 'details': {}, 'status': 'inprogress',
+             'timestamps': [None, None]}
+            ], tests)
+
+    def test_all_terminal_states_reported(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status('success', 'success')
+        result.status('skip', 'skip')
+        result.status('exists', 'exists')
+        result.status('fail', 'fail')
+        result.status('xfail', 'xfail')
+        result.status('uxsuccess', 'uxsuccess')
+        self.assertThat(tests, HasLength(6))
+        self.assertEqual(
+            ['success', 'skip', 'exists', 'fail', 'xfail', 'uxsuccess'],
+            [test['id'] for test in tests])
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(6))
+
+    def test_files_reported(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(file_name="some log.txt",
+            file_bytes=_b("1234 log message"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status(file_name="another file",
+            file_bytes=_b("""Traceback..."""), test_id="foo.bar")
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(1))
+        test = tests[0]
+        self.assertEqual("foo.bar", test['id'])
+        self.assertEqual("unknown", test['status'])
+        details = test['details']
+        self.assertEqual(
+            _u("1234 log message"), details['some log.txt'].as_text())
+        self.assertEqual(
+            _b("Traceback..."),
+            _b('').join(details['another file'].iter_bytes()))
+        self.assertEqual(
+            "application/octet-stream", repr(details['another file'].content_type))
+
+    def test_bad_mime(self):
+        # Testtools was making bad mime types, this tests that the specific
+        # corruption is catered for.
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(file_name="file", file_bytes=b'a',
+            mime_type='text/plain; charset=utf8, language=python',
+            test_id='id')
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(1))
+        test = tests[0]
+        self.assertEqual("id", test['id'])
+        details = test['details']
+        self.assertEqual(_u("a"), details['file'].as_text())
+        self.assertEqual(
+            "text/plain; charset=\"utf8\"",
+            repr(details['file'].content_type))
+
+    def test_timestamps(self):
+        tests = []
+        result = StreamToDict(tests.append)
+        result.startTestRun()
+        result.status(test_id='foo', test_status='inprogress', timestamp="A")
+        result.status(test_id='foo', test_status='success', timestamp="B")
+        result.status(test_id='bar', test_status='inprogress', timestamp="C")
+        result.stopTestRun()
+        self.assertThat(tests, HasLength(2))
+        self.assertEqual(["A", "B"], tests[0]['timestamps'])
+        self.assertEqual(["C", None], tests[1]['timestamps'])
+
+
+class TestExtendedToStreamDecorator(TestCase):
+
+    def test_explicit_time(self):
+        log = LoggingStreamResult()
+        result = ExtendedToStreamDecorator(log)
+        result.startTestRun()
+        now = datetime.datetime.now(utc)
+        result.time(now)
+        result.startTest(self)
+        result.addSuccess(self)
+        result.stopTest(self)
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status',
+             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             None,
+             now),
+            ('status',
+             'testtools.tests.test_testresult.TestExtendedToStreamDecorator.test_explicit_time',
+             'success',
+              set(),
+              True,
+              None,
+              None,
+              False,
+              None,
+              None,
+              now),
+             ('stopTestRun',)], log._events)
+
+    def test_wasSuccessful_after_stopTestRun(self):
+        log = LoggingStreamResult()
+        result = ExtendedToStreamDecorator(log)
+        result.startTestRun()
+        result.status(test_id='foo', test_status='fail')
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+
+
+class TestStreamFailFast(TestCase):
+
+    def test_inprogress(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'inprogress')
+
+    def test_exists(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'exists')
+
+    def test_xfail(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'xfail')
+
+    def test_uxsuccess(self):
+        calls = []
+        def hook():
+            calls.append("called")
+        result = StreamFailFast(hook)
+        result.status('foo', 'uxsuccess')
+        result.status('foo', 'uxsuccess')
+        self.assertEqual(['called', 'called'], calls)
+
+    def test_success(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'success')
+
+    def test_fail(self):
+        calls = []
+        def hook():
+            calls.append("called")
+        result = StreamFailFast(hook)
+        result.status('foo', 'fail')
+        result.status('foo', 'fail')
+        self.assertEqual(['called', 'called'], calls)
+
+    def test_skip(self):
+        result = StreamFailFast(self.fail)
+        result.status('foo', 'skip')
+
+
+class TestStreamSummary(TestCase):
+
+    def test_attributes(self):
+        result = StreamSummary()
+        result.startTestRun()
+        self.assertEqual([], result.failures)
+        self.assertEqual([], result.errors)
+        self.assertEqual([], result.skipped)
+        self.assertEqual([], result.expectedFailures)
+        self.assertEqual([], result.unexpectedSuccesses)
+        self.assertEqual(0, result.testsRun)
+
+    def test_startTestRun(self):
+        result = StreamSummary()
+        result.startTestRun()
+        result.failures.append('x')
+        result.errors.append('x')
+        result.skipped.append('x')
+        result.expectedFailures.append('x')
+        result.unexpectedSuccesses.append('x')
+        result.testsRun = 1
+        result.startTestRun()
+        self.assertEqual([], result.failures)
+        self.assertEqual([], result.errors)
+        self.assertEqual([], result.skipped)
+        self.assertEqual([], result.expectedFailures)
+        self.assertEqual([], result.unexpectedSuccesses)
+        self.assertEqual(0, result.testsRun)
+
+    def test_wasSuccessful(self):
+        # wasSuccessful returns False if any of
+        # failures/errors is non-empty.
+        result = StreamSummary()
+        result.startTestRun()
+        self.assertEqual(True, result.wasSuccessful())
+        result.failures.append('x')
+        self.assertEqual(False, result.wasSuccessful())
+        result.startTestRun()
+        result.errors.append('x')
+        self.assertEqual(False, result.wasSuccessful())
+        result.startTestRun()
+        result.skipped.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+        result.startTestRun()
+        result.expectedFailures.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+        result.startTestRun()
+        result.unexpectedSuccesses.append('x')
+        self.assertEqual(True, result.wasSuccessful())
+
+    def test_stopTestRun(self):
+        result = StreamSummary()
+        # terminal successful codes.
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.status("foo", "success")
+        result.status("bar", "skip")
+        result.status("baz", "exists")
+        result.stopTestRun()
+        self.assertEqual(True, result.wasSuccessful())
+        # Existence is terminal but doesn't count as 'running' a test.
+        self.assertEqual(2, result.testsRun)
+
+    def test_stopTestRun_inprogress_test_fails(self):
+        # Tests inprogress at stopTestRun trigger a failure.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+        self.assertThat(result.errors, HasLength(1))
+        self.assertEqual("foo", result.errors[0][0].id())
+        self.assertEqual("Test did not complete", result.errors[0][1])
+        # interim state detection handles route codes - while duplicate ids in
+        # one run is undesirable, it may happen (e.g. with repeated tests).
+        result.startTestRun()
+        result.status("foo", "inprogress")
+        result.status("foo", "inprogress", route_code="A")
+        result.status("foo", "success", route_code="A")
+        result.stopTestRun()
+        self.assertEqual(False, result.wasSuccessful())
+
+    def test_status_skip(self):
+        # when skip is seen, a synthetic test is reported with reason captured
+        # from the 'reason' file attachment if any.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status(file_name="reason",
+            file_bytes=_b("Missing dependency"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status("foo.bar", "skip")
+        self.assertThat(result.skipped, HasLength(1))
+        self.assertEqual("foo.bar", result.skipped[0][0].id())
+        self.assertEqual(_u("Missing dependency"), result.skipped[0][1])
+
+    def _report_files(self, result):
+        result.status(file_name="some log.txt",
+            file_bytes=_b("1234 log message"), eof=True,
+            mime_type="text/plain; charset=utf8", test_id="foo.bar")
+        result.status(file_name="traceback",
+            file_bytes=_b("""Traceback (most recent call last):
+  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""), eof=True, mime_type="text/plain; charset=utf8", test_id="foo.bar")
+
+    files_message = Equals(_u("""some log.txt: {{{1234 log message}}}
+
+Traceback (most recent call last):
+  File "testtools/tests/test_testresult.py", line 607, in test_stopTestRun
+      AllMatch(Equals([('startTestRun',), ('stopTestRun',)])))
+testtools.matchers._impl.MismatchError: Differences: [
+[('startTestRun',), ('stopTestRun',)] != []
+[('startTestRun',), ('stopTestRun',)] != []
+]
+"""))
+
+    def test_status_fail(self):
+        # when fail is seen, a synthetic test is reported with all files
+        # attached shown as the message.
+        result = StreamSummary()
+        result.startTestRun()
+        self._report_files(result)
+        result.status("foo.bar", "fail")
+        self.assertThat(result.errors, HasLength(1))
+        self.assertEqual("foo.bar", result.errors[0][0].id())
+        self.assertThat(result.errors[0][1], self.files_message)
+
+    def test_status_xfail(self):
+        # when xfail is seen, a synthetic test is reported with all files
+        # attached shown as the message.
+        result = StreamSummary()
+        result.startTestRun()
+        self._report_files(result)
+        result.status("foo.bar", "xfail")
+        self.assertThat(result.expectedFailures, HasLength(1))
+        self.assertEqual("foo.bar", result.expectedFailures[0][0].id())
+        self.assertThat(result.expectedFailures[0][1], self.files_message)
+
+    def test_status_uxsuccess(self):
+        # when uxsuccess is seen, a synthetic test is reported.
+        result = StreamSummary()
+        result.startTestRun()
+        result.status("foo.bar", "uxsuccess")
+        self.assertThat(result.unexpectedSuccesses, HasLength(1))
+        self.assertEqual("foo.bar", result.unexpectedSuccesses[0].id())
+
+
+class TestTestControl(TestCase):
+
+    def test_default(self):
+        self.assertEqual(False, TestControl().shouldStop)
+
+    def test_stop(self):
+        control = TestControl()
+        control.stop()
+        self.assertEqual(True, control.shouldStop)
+
+
+class TestTestResult(TestCase):
+    """Tests for 'TestResult'."""
+
+    run_tests_with = FullStackRunTest
+
+    def makeResult(self):
+        """Make an arbitrary result for testing."""
+        return TestResult()
+
+    def test_addSkipped(self):
+        # Calling addSkip on a TestResult records the test that was skipped in
+        # its skip_reasons dict.
+        result = self.makeResult()
+        result.addSkip(self, _u("Skipped for some reason"))
+        self.assertEqual({_u("Skipped for some reason"):[self]},
+            result.skip_reasons)
+        result.addSkip(self, _u("Skipped for some reason"))
+        self.assertEqual({_u("Skipped for some reason"):[self, self]},
+            result.skip_reasons)
+        result.addSkip(self, _u("Skipped for another reason"))
+        self.assertEqual({_u("Skipped for some reason"):[self, self],
+            _u("Skipped for another reason"):[self]},
+            result.skip_reasons)
+
+    def test_now_datetime_now(self):
+        result = self.makeResult()
+        olddatetime = testresult.real.datetime
+        def restore():
+            testresult.real.datetime = olddatetime
+        self.addCleanup(restore)
+        class Module:
+            pass
+        now = datetime.datetime.now(utc)
+        stubdatetime = Module()
+        stubdatetime.datetime = Module()
+        stubdatetime.datetime.now = lambda tz: now
+        testresult.real.datetime = stubdatetime
+        # Calling _now() looks up the time.
+        self.assertEqual(now, result._now())
+        then = now + datetime.timedelta(0, 1)
+        # Set an explicit datetime, which gets returned from then on.
+        result.time(then)
+        self.assertNotEqual(now, result._now())
+        self.assertEqual(then, result._now())
+        # go back to looking it up.
+        result.time(None)
+        self.assertEqual(now, result._now())
+
+    def test_now_datetime_time(self):
+        result = self.makeResult()
+        now = datetime.datetime.now(utc)
+        result.time(now)
+        self.assertEqual(now, result._now())
+
+    def test_traceback_formatting_without_stack_hidden(self):
+        # During the testtools test run, we show our levels of the stack,
+        # because we want to be able to use our test suite to debug our own
+        # code.
+        result = self.makeResult()
+        test = make_erroring_test()
+        test.run(result)
+        self.assertThat(
+            result.errors[0][1],
+            DocTestMatches(
+                'Traceback (most recent call last):\n'
+                '  File "...testtools...runtest.py", line ..., in _run_user\n'
+                '    return fn(*args, **kwargs)\n'
+                '  File "...testtools...testcase.py", line ..., in _run_test_method\n'
+                '    return self._get_test_method()()\n'
+                '  File "...testtools...tests...test_testresult.py", line ..., in error\n'
+                '    1/0\n'
+                'ZeroDivisionError: ...\n',
+                doctest.ELLIPSIS | doctest.REPORT_UDIFF))
+
+    def test_traceback_formatting_with_stack_hidden(self):
+        result = self.makeResult()
+        test = make_erroring_test()
+        run_with_stack_hidden(True, test.run, result)
+        self.assertThat(
+            result.errors[0][1],
+            DocTestMatches(
+                'Traceback (most recent call last):\n'
+                '  File "...testtools...tests...test_testresult.py", line ..., in error\n'
+                '    1/0\n'
+                'ZeroDivisionError: ...\n',
+                doctest.ELLIPSIS))
+
+    def test_traceback_formatting_with_stack_hidden_mismatch(self):
+        result = self.makeResult()
+        test = make_mismatching_test()
+        run_with_stack_hidden(True, test.run, result)
+        self.assertThat(
+            result.failures[0][1],
+            DocTestMatches(
+                'Traceback (most recent call last):\n'
+                '  File "...testtools...tests...test_testresult.py", line ..., in mismatch\n'
+                '    self.assertEqual(1, 2)\n'
+                '...MismatchError: 1 != 2\n',
+                doctest.ELLIPSIS))
+
+    def test_exc_info_to_unicode(self):
+        # subunit upcalls to TestResult._exc_info_to_unicode, so we need to
+        # make sure that it's there.
+        #
+        # See <https://bugs.launchpad.net/testtools/+bug/929063>.
+        test = make_erroring_test()
+        exc_info = make_exception_info(RuntimeError, "foo")
+        result = self.makeResult()
+        text_traceback = result._exc_info_to_unicode(exc_info, test)
+        self.assertEqual(
+            TracebackContent(exc_info, test).as_text(), text_traceback)
+
+
+class TestMultiTestResult(TestCase):
+    """Tests for 'MultiTestResult'."""
+
+    def setUp(self):
+        super(TestMultiTestResult, self).setUp()
+        self.result1 = LoggingResult([])
+        self.result2 = LoggingResult([])
+        self.multiResult = MultiTestResult(self.result1, self.result2)
+
+    def assertResultLogsEqual(self, expectedEvents):
+        """Assert that our test results have received the expected events."""
+        self.assertEqual(expectedEvents, self.result1._events)
+        self.assertEqual(expectedEvents, self.result2._events)
+
+    def test_repr(self):
+        self.assertEqual(
+            '<MultiTestResult (%r, %r)>' % (
+                ExtendedToOriginalDecorator(self.result1),
+                ExtendedToOriginalDecorator(self.result2)),
+            repr(self.multiResult))
+
+    def test_empty(self):
+        # Initializing a `MultiTestResult` doesn't do anything to its
+        # `TestResult`s.
+        self.assertResultLogsEqual([])
+
+    def test_failfast_get(self):
+        # Reading reads from the first one - arbitrary choice.
+        self.assertEqual(False, self.multiResult.failfast)
+        self.result1.failfast = True
+        self.assertEqual(True, self.multiResult.failfast)
+
+    def test_failfast_set(self):
+        # Writing writes to all.
+        self.multiResult.failfast = True
+        self.assertEqual(True, self.result1.failfast)
+        self.assertEqual(True, self.result2.failfast)
+
+    def test_shouldStop(self):
+        self.assertFalse(self.multiResult.shouldStop)
+        self.result2.stop()
+        # NB: result1 is not stopped: MultiTestResult has to combine the
+        # values.
+        self.assertTrue(self.multiResult.shouldStop)
+
+    def test_startTest(self):
+        # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+        # its `TestResult`s.
+        self.multiResult.startTest(self)
+        self.assertResultLogsEqual([('startTest', self)])
+
+    def test_stop(self):
+        self.assertFalse(self.multiResult.shouldStop)
+        self.multiResult.stop()
+        self.assertResultLogsEqual(['stop'])
+
+    def test_stopTest(self):
+        # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+        # its `TestResult`s.
+        self.multiResult.stopTest(self)
+        self.assertResultLogsEqual([('stopTest', self)])
+
+    def test_addSkipped(self):
+        # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+        # results.
+        reason = _u("Skipped for some reason")
+        self.multiResult.addSkip(self, reason)
+        self.assertResultLogsEqual([('addSkip', self, reason)])
+
+    def test_addSuccess(self):
+        # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+        # all its `TestResult`s.
+        self.multiResult.addSuccess(self)
+        self.assertResultLogsEqual([('addSuccess', self)])
+
+    def test_done(self):
+        # Calling `done` on a `MultiTestResult` calls `done` on all its
+        # `TestResult`s.
+        self.multiResult.done()
+        self.assertResultLogsEqual([('done')])
+
+    def test_addFailure(self):
+        # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+        # all its `TestResult`s.
+        exc_info = make_exception_info(AssertionError, 'failure')
+        self.multiResult.addFailure(self, exc_info)
+        self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+    def test_addError(self):
+        # Calling `addError` on a `MultiTestResult` calls `addError` on all
+        # its `TestResult`s.
+        exc_info = make_exception_info(RuntimeError, 'error')
+        self.multiResult.addError(self, exc_info)
+        self.assertResultLogsEqual([('addError', self, exc_info)])
+
+    def test_startTestRun(self):
+        # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+        # `TestResult`s.
+        self.multiResult.startTestRun()
+        self.assertResultLogsEqual([('startTestRun')])
+
+    def test_stopTestRun(self):
+        # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+        # `TestResult`s.
+        self.multiResult.stopTestRun()
+        self.assertResultLogsEqual([('stopTestRun')])
+
+    def test_stopTestRun_returns_results(self):
+        # `MultiTestResult.stopTestRun` returns a tuple of all of the return
+        # values the `stopTestRun`s that it forwards to.
+        class Result(LoggingResult):
+            def stopTestRun(self):
+                super(Result, self).stopTestRun()
+                return 'foo'
+        multi_result = MultiTestResult(Result([]), Result([]))
+        result = multi_result.stopTestRun()
+        self.assertEqual(('foo', 'foo'), result)
+
+    def test_tags(self):
+        # Calling `tags` on a `MultiTestResult` calls `tags` on all its
+        # `TestResult`s.
+        added_tags = set(['foo', 'bar'])
+        removed_tags = set(['eggs'])
+        self.multiResult.tags(added_tags, removed_tags)
+        self.assertResultLogsEqual([('tags', added_tags, removed_tags)])
+
+    def test_time(self):
+        # the time call is dispatched, not eaten by the base class
+        self.multiResult.time('foo')
+        self.assertResultLogsEqual([('time', 'foo')])
+
+
+class TestTextTestResult(TestCase):
+    """Tests for 'TextTestResult'."""
+
+    def setUp(self):
+        super(TestTextTestResult, self).setUp()
+        self.result = TextTestResult(StringIO())
+
+    def getvalue(self):
+        return self.result.stream.getvalue()
+
+    def test__init_sets_stream(self):
+        result = TextTestResult("fp")
+        self.assertEqual("fp", result.stream)
+
+    def reset_output(self):
+        self.result.stream = StringIO()
+
+    def test_startTestRun(self):
+        self.result.startTestRun()
+        self.assertEqual("Tests running...\n", self.getvalue())
+
+    def test_stopTestRun_count_many(self):
+        test = make_test()
+        self.result.startTestRun()
+        self.result.startTest(test)
+        self.result.stopTest(test)
+        self.result.startTest(test)
+        self.result.stopTest(test)
+        self.result.stream = StringIO()
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("\nRan 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+    def test_stopTestRun_count_single(self):
+        test = make_test()
+        self.result.startTestRun()
+        self.result.startTest(test)
+        self.result.stopTest(test)
+        self.reset_output()
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("\nRan 1 test in ...s\nOK\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_count_zero(self):
+        self.result.startTestRun()
+        self.reset_output()
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("\nRan 0 tests in ...s\nOK\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_current_time(self):
+        test = make_test()
+        now = datetime.datetime.now(utc)
+        self.result.time(now)
+        self.result.startTestRun()
+        self.result.startTest(test)
+        now = now + datetime.timedelta(0, 0, 0, 1)
+        self.result.time(now)
+        self.result.stopTest(test)
+        self.reset_output()
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+    def test_stopTestRun_successful(self):
+        self.result.startTestRun()
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("...\nOK\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_not_successful_failure(self):
+        test = make_failing_test()
+        self.result.startTestRun()
+        test.run(self.result)
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_not_successful_error(self):
+        test = make_erroring_test()
+        self.result.startTestRun()
+        test.run(self.result)
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_not_successful_unexpected_success(self):
+        test = make_unexpectedly_successful_test()
+        self.result.startTestRun()
+        test.run(self.result)
+        self.result.stopTestRun()
+        self.assertThat(self.getvalue(),
+            DocTestMatches("...\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+    def test_stopTestRun_shows_details(self):
+        self.skip("Disabled per bug 1188420")
+        def run_tests():
+            self.result.startTestRun()
+            make_erroring_test().run(self.result)
+            make_unexpectedly_successful_test().run(self.result)
+            make_failing_test().run(self.result)
+            self.reset_output()
+            self.result.stopTestRun()
+        run_with_stack_hidden(True, run_tests)
+        self.assertThat(self.getvalue(),
+            DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "...testtools...tests...test_testresult.py", line ..., in error
+    1/0
+ZeroDivisionError:... divi... by zero...
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Traceback (most recent call last):
+  File "...testtools...tests...test_testresult.py", line ..., in failed
+    self.fail("yo!")
+AssertionError: yo!
+======================================================================
+UNEXPECTED SUCCESS: testtools.tests.test_testresult.Test.succeeded
+----------------------------------------------------------------------
+...""", doctest.ELLIPSIS | doctest.REPORT_NDIFF))
+
+
+class TestThreadSafeForwardingResult(TestCase):
+    """Tests for `TestThreadSafeForwardingResult`."""
+
+    def make_results(self, n):
+        events = []
+        target = LoggingResult(events)
+        semaphore = threading.Semaphore(1)
+        return [
+            ThreadsafeForwardingResult(target, semaphore)
+            for i in range(n)], events
+
+    def test_nonforwarding_methods(self):
+        # startTest and stopTest are not forwarded because they need to be
+        # batched.
+        [result], events = self.make_results(1)
+        result.startTest(self)
+        result.stopTest(self)
+        self.assertEqual([], events)
+
+    def test_tags_not_forwarded(self):
+        # Tags need to be batched for each test, so they aren't forwarded
+        # until a test runs.
+        [result], events = self.make_results(1)
+        result.tags(set(['foo']), set(['bar']))
+        self.assertEqual([], events)
+
+    def test_global_tags_simple(self):
+        # Tags specified outside of a test result are global. When a test's
+        # results are finally forwarded, we send through these global tags
+        # *as* test specific tags, because as a multiplexer there should be no
+        # way for a global tag on an input stream to affect tests from other
+        # streams - we can just always issue test local tags.
+        [result], events = self.make_results(1)
+        result.tags(set(['foo']), set())
+        result.time(1)
+        result.startTest(self)
+        result.time(2)
+        result.addSuccess(self)
+        self.assertEqual(
+            [('time', 1),
+             ('startTest', self),
+             ('time', 2),
+             ('tags', set(['foo']), set()),
+             ('addSuccess', self),
+             ('stopTest', self),
+             ], events)
+
+    def test_global_tags_complex(self):
+        # Multiple calls to tags() in a global context are buffered until the
+        # next test completes and are issued as part of of the test context,
+        # because they cannot be issued until the output result is locked.
+        # The sample data shows them being merged together, this is, strictly
+        # speaking incidental - they could be issued separately (in-order) and
+        # still be legitimate.
+        [result], events = self.make_results(1)
+        result.tags(set(['foo', 'bar']), set(['baz', 'qux']))
+        result.tags(set(['cat', 'qux']), set(['bar', 'dog']))
+        result.time(1)
+        result.startTest(self)
+        result.time(2)
+        result.addSuccess(self)
+        self.assertEqual(
+            [('time', 1),
+             ('startTest', self),
+             ('time', 2),
+             ('tags', set(['cat', 'foo', 'qux']), set(['dog', 'bar', 'baz'])),
+             ('addSuccess', self),
+             ('stopTest', self),
+             ], events)
+
+    def test_local_tags(self):
+        # Any tags set within a test context are forwarded in that test
+        # context when the result is finally forwarded.  This means that the
+        # tags for the test are part of the atomic message communicating
+        # everything about that test.
+        [result], events = self.make_results(1)
+        result.time(1)
+        result.startTest(self)
+        result.tags(set(['foo']), set([]))
+        result.tags(set(), set(['bar']))
+        result.time(2)
+        result.addSuccess(self)
+        self.assertEqual(
+            [('time', 1),
+             ('startTest', self),
+             ('time', 2),
+             ('tags', set(['foo']), set(['bar'])),
+             ('addSuccess', self),
+             ('stopTest', self),
+             ], events)
+
+    def test_local_tags_dont_leak(self):
+        # A tag set during a test is local to that test and is not set during
+        # the tests that follow.
+        [result], events = self.make_results(1)
+        a, b = PlaceHolder('a'), PlaceHolder('b')
+        result.time(1)
+        result.startTest(a)
+        result.tags(set(['foo']), set([]))
+        result.time(2)
+        result.addSuccess(a)
+        result.stopTest(a)
+        result.time(3)
+        result.startTest(b)
+        result.time(4)
+        result.addSuccess(b)
+        result.stopTest(b)
+        self.assertEqual(
+            [('time', 1),
+             ('startTest', a),
+             ('time', 2),
+             ('tags', set(['foo']), set()),
+             ('addSuccess', a),
+             ('stopTest', a),
+             ('time', 3),
+             ('startTest', b),
+             ('time', 4),
+             ('addSuccess', b),
+             ('stopTest', b),
+             ], events)
+
+    def test_startTestRun(self):
+        # Calls to startTestRun are not batched, because we are only
+        # interested in sending tests atomically, not the whole run.
+        [result1, result2], events = self.make_results(2)
+        result1.startTestRun()
+        result2.startTestRun()
+        self.assertEqual(["startTestRun", "startTestRun"], events)
+
+    def test_stopTestRun(self):
+        # Calls to stopTestRun are not batched, because we are only
+        # interested in sending tests atomically, not the whole run.
+        [result1, result2], events = self.make_results(2)
+        result1.stopTestRun()
+        result2.stopTestRun()
+        self.assertEqual(["stopTestRun", "stopTestRun"], events)
+
+    def test_forward_addError(self):
+        # Once we receive an addError event, we forward all of the events for
+        # that test, as we now know that test is complete.
+        [result], events = self.make_results(1)
+        exc_info = make_exception_info(RuntimeError, 'error')
+        start_time = datetime.datetime.utcfromtimestamp(1.489)
+        end_time = datetime.datetime.utcfromtimestamp(51.476)
+        result.time(start_time)
+        result.startTest(self)
+        result.time(end_time)
+        result.addError(self, exc_info)
+        self.assertEqual([
+            ('time', start_time),
+            ('startTest', self),
+            ('time', end_time),
+            ('addError', self, exc_info),
+            ('stopTest', self),
+            ], events)
+
+    def test_forward_addFailure(self):
+        # Once we receive an addFailure event, we forward all of the events
+        # for that test, as we now know that test is complete.
+        [result], events = self.make_results(1)
+        exc_info = make_exception_info(AssertionError, 'failure')
+        start_time = datetime.datetime.utcfromtimestamp(2.489)
+        end_time = datetime.datetime.utcfromtimestamp(3.476)
+        result.time(start_time)
+        result.startTest(self)
+        result.time(end_time)
+        result.addFailure(self, exc_info)
+        self.assertEqual([
+            ('time', start_time),
+            ('startTest', self),
+            ('time', end_time),
+            ('addFailure', self, exc_info),
+            ('stopTest', self),
+            ], events)
+
+    def test_forward_addSkip(self):
+        # Once we receive an addSkip event, we forward all of the events for
+        # that test, as we now know that test is complete.
+        [result], events = self.make_results(1)
+        reason = _u("Skipped for some reason")
+        start_time = datetime.datetime.utcfromtimestamp(4.489)
+        end_time = datetime.datetime.utcfromtimestamp(5.476)
+        result.time(start_time)
+        result.startTest(self)
+        result.time(end_time)
+        result.addSkip(self, reason)
+        self.assertEqual([
+            ('time', start_time),
+            ('startTest', self),
+            ('time', end_time),
+            ('addSkip', self, reason),
+            ('stopTest', self),
+            ], events)
+
+    def test_forward_addSuccess(self):
+        # Once we receive an addSuccess event, we forward all of the events
+        # for that test, as we now know that test is complete.
+        [result], events = self.make_results(1)
+        start_time = datetime.datetime.utcfromtimestamp(6.489)
+        end_time = datetime.datetime.utcfromtimestamp(7.476)
+        result.time(start_time)
+        result.startTest(self)
+        result.time(end_time)
+        result.addSuccess(self)
+        self.assertEqual([
+            ('time', start_time),
+            ('startTest', self),
+            ('time', end_time),
+            ('addSuccess', self),
+            ('stopTest', self),
+            ], events)
+
+    def test_only_one_test_at_a_time(self):
+        # Even if there are multiple ThreadsafeForwardingResults forwarding to
+        # the same target result, the target result only receives the complete
+        # events for one test at a time.
+        [result1, result2], events = self.make_results(2)
+        test1, test2 = self, make_test()
+        start_time1 = datetime.datetime.utcfromtimestamp(1.489)
+        end_time1 = datetime.datetime.utcfromtimestamp(2.476)
+        start_time2 = datetime.datetime.utcfromtimestamp(3.489)
+        end_time2 = datetime.datetime.utcfromtimestamp(4.489)
+        result1.time(start_time1)
+        result2.time(start_time2)
+        result1.startTest(test1)
+        result2.startTest(test2)
+        result1.time(end_time1)
+        result2.time(end_time2)
+        result2.addSuccess(test2)
+        result1.addSuccess(test1)
+        self.assertEqual([
+            # test2 finishes first, and so is flushed first.
+            ('time', start_time2),
+            ('startTest', test2),
+            ('time', end_time2),
+            ('addSuccess', test2),
+            ('stopTest', test2),
+            # test1 finishes next, and thus follows.
+            ('time', start_time1),
+            ('startTest', test1),
+            ('time', end_time1),
+            ('addSuccess', test1),
+            ('stopTest', test1),
+            ], events)
+
+
+class TestMergeTags(TestCase):
+
+    def test_merge_unseen_gone_tag(self):
+        # If an incoming "gone" tag isn't currently tagged one way or the
+        # other, add it to the "gone" tags.
+        current_tags = set(['present']), set(['missing'])
+        changing_tags = set(), set(['going'])
+        expected = set(['present']), set(['missing', 'going'])
+        self.assertEqual(
+            expected, _merge_tags(current_tags, changing_tags))
+
+    def test_merge_incoming_gone_tag_with_current_new_tag(self):
+        # If one of the incoming "gone" tags is one of the existing "new"
+        # tags, then it overrides the "new" tag, leaving it marked as "gone".
+        current_tags = set(['present', 'going']), set(['missing'])
+        changing_tags = set(), set(['going'])
+        expected = set(['present']), set(['missing', 'going'])
+        self.assertEqual(
+            expected, _merge_tags(current_tags, changing_tags))
+
+    def test_merge_unseen_new_tag(self):
+        current_tags = set(['present']), set(['missing'])
+        changing_tags = set(['coming']), set()
+        expected = set(['coming', 'present']), set(['missing'])
+        self.assertEqual(
+            expected, _merge_tags(current_tags, changing_tags))
+
+    def test_merge_incoming_new_tag_with_current_gone_tag(self):
+        # If one of the incoming "new" tags is currently marked as "gone",
+        # then it overrides the "gone" tag, leaving it marked as "new".
+        current_tags = set(['present']), set(['coming', 'missing'])
+        changing_tags = set(['coming']), set()
+        expected = set(['coming', 'present']), set(['missing'])
+        self.assertEqual(
+            expected, _merge_tags(current_tags, changing_tags))
+
+
+class TestStreamResultRouter(TestCase):
+
+    def test_start_stop_test_run_no_fallback(self):
+        result = StreamResultRouter()
+        result.startTestRun()
+        result.stopTestRun()
+
+    def test_no_fallback_errors(self):
+        self.assertRaises(Exception, StreamResultRouter().status, test_id='f')
+
+    def test_fallback_calls(self):
+        fallback = LoggingStreamResult()
+        result = StreamResultRouter(fallback)
+        result.startTestRun()
+        result.status(test_id='foo')
+        result.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ('stopTestRun',),
+            ],
+            fallback._events)
+
+    def test_fallback_no_do_start_stop_run(self):
+        fallback = LoggingStreamResult()
+        result = StreamResultRouter(fallback, do_start_stop_run=False)
+        result.startTestRun()
+        result.status(test_id='foo')
+        result.stopTestRun()
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None)
+            ],
+            fallback._events)
+
+    def test_add_rule_bad_policy(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(ValueError, router.add_rule, target, 'route_code_prefixa',
+            route_prefix='0')
+
+    def test_add_rule_extra_policy_arg(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+            route_prefix='0', foo=1)
+
+    def test_add_rule_missing_prefix(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix')
+
+    def test_add_rule_slash_in_prefix(self):
+        router = StreamResultRouter()
+        target = LoggingStreamResult()
+        self.assertRaises(TypeError, router.add_rule, target, 'route_code_prefix',
+            route_prefix='0/')
+
+    def test_add_rule_route_code_consume_False(self):
+        fallback = LoggingStreamResult()
+        target = LoggingStreamResult()
+        router = StreamResultRouter(fallback)
+        router.add_rule(target, 'route_code_prefix', route_prefix='0')
+        router.status(test_id='foo', route_code='0')
+        router.status(test_id='foo', route_code='0/1')
+        router.status(test_id='foo')
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, '0',
+             None),
+            ('status', 'foo', None, None, True, None, None, False, None, '0/1',
+             None),
+            ],
+            target._events)
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ],
+            fallback._events)
+
+    def test_add_rule_route_code_consume_True(self):
+        fallback = LoggingStreamResult()
+        target = LoggingStreamResult()
+        router = StreamResultRouter(fallback)
+        router.add_rule(
+            target, 'route_code_prefix', route_prefix='0', consume_route=True)
+        router.status(test_id='foo', route_code='0') # -> None
+        router.status(test_id='foo', route_code='0/1') # -> 1
+        router.status(test_id='foo', route_code='1') # -> fallback as-is.
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, None,
+             None),
+            ('status', 'foo', None, None, True, None, None, False, None, '1',
+             None),
+            ],
+            target._events)
+        self.assertEqual([
+            ('status', 'foo', None, None, True, None, None, False, None, '1',
+             None),
+            ],
+            fallback._events)
+
+    def test_add_rule_test_id(self):
+        nontest = LoggingStreamResult()
+        test = LoggingStreamResult()
+        router = StreamResultRouter(test)
+        router.add_rule(nontest, 'test_id', test_id=None)
+        router.status(test_id='foo', file_name="bar", file_bytes=b'')
+        router.status(file_name="bar", file_bytes=b'')
+        self.assertEqual([
+            ('status', 'foo', None, None, True, 'bar', b'', False, None, None,
+             None),], test._events)
+        self.assertEqual([
+            ('status', None, None, None, True, 'bar', b'', False, None, None,
+             None),], nontest._events)
+
+    def test_add_rule_do_start_stop_run(self):
+        nontest = LoggingStreamResult()
+        router = StreamResultRouter()
+        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+        router.startTestRun()
+        router.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('stopTestRun',),
+            ], nontest._events)
+
+    def test_add_rule_do_start_stop_run_after_startTestRun(self):
+        nontest = LoggingStreamResult()
+        router = StreamResultRouter()
+        router.startTestRun()
+        router.add_rule(nontest, 'test_id', test_id=None, do_start_stop_run=True)
+        router.stopTestRun()
+        self.assertEqual([
+            ('startTestRun',),
+            ('stopTestRun',),
+            ], nontest._events)
+
+
+class TestStreamToQueue(TestCase):
+
+    def make_result(self):
+        queue = Queue()
+        return queue, StreamToQueue(queue, "foo")
+
+    def test_status(self):
+        def check_event(event_dict, route=None, time=None):
+            self.assertEqual("status", event_dict['event'])
+            self.assertEqual("test", event_dict['test_id'])
+            self.assertEqual("fail", event_dict['test_status'])
+            self.assertEqual(set(["quux"]), event_dict['test_tags'])
+            self.assertEqual(False, event_dict['runnable'])
+            self.assertEqual("file", event_dict['file_name'])
+            self.assertEqual(_b("content"), event_dict['file_bytes'])
+            self.assertEqual(True, event_dict['eof'])
+            self.assertEqual("quux", event_dict['mime_type'])
+            self.assertEqual("test", event_dict['test_id'])
+            self.assertEqual(route, event_dict['route_code'])
+            self.assertEqual(time, event_dict['timestamp'])
+        queue, result = self.make_result()
+        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+            file_name="file", file_bytes=_b("content"), eof=True,
+            mime_type="quux", route_code=None, timestamp=None)
+        self.assertEqual(1, queue.qsize())
+        a_time = datetime.datetime.now(utc)
+        result.status("test", "fail", test_tags=set(["quux"]), runnable=False,
+            file_name="file", file_bytes=_b("content"), eof=True,
+            mime_type="quux", route_code="bar", timestamp=a_time)
+        self.assertEqual(2, queue.qsize())
+        check_event(queue.get(False), route="foo", time=None)
+        check_event(queue.get(False), route="foo/bar", time=a_time)
+
+    def testStartTestRun(self):
+        queue, result = self.make_result()
+        result.startTestRun()
+        self.assertEqual(
+            {'event':'startTestRun', 'result':result}, queue.get(False))
+        self.assertTrue(queue.empty())
+
+    def testStopTestRun(self):
+        queue, result = self.make_result()
+        result.stopTestRun()
+        self.assertEqual(
+            {'event':'stopTestRun', 'result':result}, queue.get(False))
+        self.assertTrue(queue.empty())
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+    def make_26_result(self):
+        self.result = Python26TestResult()
+        self.make_converter()
+
+    def make_27_result(self):
+        self.result = Python27TestResult()
+        self.make_converter()
+
+    def make_converter(self):
+        self.converter = ExtendedToOriginalDecorator(self.result)
+
+    def make_extended_result(self):
+        self.result = ExtendedTestResult()
+        self.make_converter()
+
+    def check_outcome_details(self, outcome):
+        """Call an outcome with a details dict to be passed through."""
+        # This dict is /not/ convertible - thats deliberate, as it should
+        # not hit the conversion code path.
+        details = {'foo': 'bar'}
+        getattr(self.converter, outcome)(self, details=details)
+        self.assertEqual([(outcome, self, details)], self.result._events)
+
+    def get_details_and_string(self):
+        """Get a details dict and expected string."""
+        text1 = lambda: [_b("1\n2\n")]
+        text2 = lambda: [_b("3\n4\n")]
+        bin1 = lambda: [_b("5\n")]
+        details = {'text 1': Content(ContentType('text', 'plain'), text1),
+            'text 2': Content(ContentType('text', 'strange'), text2),
+            'bin 1': Content(ContentType('application', 'binary'), bin1)}
+        return (details,
+                ("Binary content:\n"
+                 "  bin 1 (application/binary)\n"
+                 "\n"
+                 "text 1: {{{\n"
+                 "1\n"
+                 "2\n"
+                 "}}}\n"
+                 "\n"
+                 "text 2: {{{\n"
+                 "3\n"
+                 "4\n"
+                 "}}}\n"))
+
+    def check_outcome_details_to_exec_info(self, outcome, expected=None):
+        """Call an outcome with a details dict to be made into exc_info."""
+        # The conversion is a done using RemoteError and the string contents
+        # of the text types in the details dict.
+        if not expected:
+            expected = outcome
+        details, err_str = self.get_details_and_string()
+        getattr(self.converter, outcome)(self, details=details)
+        err = self.converter._details_to_exc_info(details)
+        self.assertEqual([(expected, self, err)], self.result._events)
+
+    def check_outcome_details_to_nothing(self, outcome, expected=None):
+        """Call an outcome with a details dict to be swallowed."""
+        if not expected:
+            expected = outcome
+        details = {'foo': 'bar'}
+        getattr(self.converter, outcome)(self, details=details)
+        self.assertEqual([(expected, self)], self.result._events)
+
+    def check_outcome_details_to_string(self, outcome):
+        """Call an outcome with a details dict to be stringified."""
+        details, err_str = self.get_details_and_string()
+        getattr(self.converter, outcome)(self, details=details)
+        self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+    def check_outcome_details_to_arg(self, outcome, arg, extra_detail=None):
+        """Call an outcome with a details dict to have an arg extracted."""
+        details, _ = self.get_details_and_string()
+        if extra_detail:
+            details.update(extra_detail)
+        getattr(self.converter, outcome)(self, details=details)
+        self.assertEqual([(outcome, self, arg)], self.result._events)
+
+    def check_outcome_exc_info(self, outcome, expected=None):
+        """Check that calling a legacy outcome still works."""
+        # calling some outcome with the legacy exc_info style api (no keyword
+        # parameters) gets passed through.
+        if not expected:
+            expected = outcome
+        err = sys.exc_info()
+        getattr(self.converter, outcome)(self, err)
+        self.assertEqual([(expected, self, err)], self.result._events)
+
+    def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+        """Check that calling a legacy outcome on a fallback works."""
+        # calling some outcome with the legacy exc_info style api (no keyword
+        # parameters) gets passed through.
+        if not expected:
+            expected = outcome
+        err = sys.exc_info()
+        getattr(self.converter, outcome)(self, err)
+        self.assertEqual([(expected, self)], self.result._events)
+
+    def check_outcome_nothing(self, outcome, expected=None):
+        """Check that calling a legacy outcome still works."""
+        if not expected:
+            expected = outcome
+        getattr(self.converter, outcome)(self)
+        self.assertEqual([(expected, self)], self.result._events)
+
+    def check_outcome_string_nothing(self, outcome, expected):
+        """Check that calling outcome with a string calls expected."""
+        getattr(self.converter, outcome)(self, "foo")
+        self.assertEqual([(expected, self)], self.result._events)
+
+    def check_outcome_string(self, outcome):
+        """Check that calling outcome with a string works."""
+        getattr(self.converter, outcome)(self, "foo")
+        self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+    TestExtendedToOriginalResultDecoratorBase):
+
+    def test_failfast_py26(self):
+        self.make_26_result()
+        self.assertEqual(False, self.converter.failfast)
+        self.converter.failfast = True
+        self.assertFalse(safe_hasattr(self.converter.decorated, 'failfast'))
+
+    def test_failfast_py27(self):
+        self.make_27_result()
+        self.assertEqual(False, self.converter.failfast)
+        # setting it should write it to the backing result
+        self.converter.failfast = True
+        self.assertEqual(True, self.converter.decorated.failfast)
+
+    def test_progress_py26(self):
+        self.make_26_result()
+        self.converter.progress(1, 2)
+
+    def test_progress_py27(self):
+        self.make_27_result()
+        self.converter.progress(1, 2)
+
+    def test_progress_pyextended(self):
+        self.make_extended_result()
+        self.converter.progress(1, 2)
+        self.assertEqual([('progress', 1, 2)], self.result._events)
+
+    def test_shouldStop(self):
+        self.make_26_result()
+        self.assertEqual(False, self.converter.shouldStop)
+        self.converter.decorated.stop()
+        self.assertEqual(True, self.converter.shouldStop)
+
+    def test_startTest_py26(self):
+        self.make_26_result()
+        self.converter.startTest(self)
+        self.assertEqual([('startTest', self)], self.result._events)
+
+    def test_startTest_py27(self):
+        self.make_27_result()
+        self.converter.startTest(self)
+        self.assertEqual([('startTest', self)], self.result._events)
+
+    def test_startTest_pyextended(self):
+        self.make_extended_result()
+        self.converter.startTest(self)
+        self.assertEqual([('startTest', self)], self.result._events)
+
+    def test_startTestRun_py26(self):
+        self.make_26_result()
+        self.converter.startTestRun()
+        self.assertEqual([], self.result._events)
+
+    def test_startTestRun_py27(self):
+        self.make_27_result()
+        self.converter.startTestRun()
+        self.assertEqual([('startTestRun',)], self.result._events)
+
+    def test_startTestRun_pyextended(self):
+        self.make_extended_result()
+        self.converter.startTestRun()
+        self.assertEqual([('startTestRun',)], self.result._events)
+
+    def test_stopTest_py26(self):
+        self.make_26_result()
+        self.converter.stopTest(self)
+        self.assertEqual([('stopTest', self)], self.result._events)
+
+    def test_stopTest_py27(self):
+        self.make_27_result()
+        self.converter.stopTest(self)
+        self.assertEqual([('stopTest', self)], self.result._events)
+
+    def test_stopTest_pyextended(self):
+        self.make_extended_result()
+        self.converter.stopTest(self)
+        self.assertEqual([('stopTest', self)], self.result._events)
+
+    def test_stopTestRun_py26(self):
+        self.make_26_result()
+        self.converter.stopTestRun()
+        self.assertEqual([], self.result._events)
+
+    def test_stopTestRun_py27(self):
+        self.make_27_result()
+        self.converter.stopTestRun()
+        self.assertEqual([('stopTestRun',)], self.result._events)
+
+    def test_stopTestRun_pyextended(self):
+        self.make_extended_result()
+        self.converter.stopTestRun()
+        self.assertEqual([('stopTestRun',)], self.result._events)
+
+    def test_tags_py26(self):
+        self.make_26_result()
+        self.converter.tags(set([1]), set([2]))
+
+    def test_tags_py27(self):
+        self.make_27_result()
+        self.converter.tags(set([1]), set([2]))
+
+    def test_tags_pyextended(self):
+        self.make_extended_result()
+        self.converter.tags(set([1]), set([2]))
+        self.assertEqual([('tags', set([1]), set([2]))], self.result._events)
+
+    def test_time_py26(self):
+        self.make_26_result()
+        self.converter.time(1)
+
+    def test_time_py27(self):
+        self.make_27_result()
+        self.converter.time(1)
+
+    def test_time_pyextended(self):
+        self.make_extended_result()
+        self.converter.time(1)
+        self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+    outcome = 'addError'
+
+    def test_outcome_Original_py26(self):
+        self.make_26_result()
+        self.check_outcome_exc_info(self.outcome)
+
+    def test_outcome_Original_py27(self):
+        self.make_27_result()
+        self.check_outcome_exc_info(self.outcome)
+
+    def test_outcome_Original_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_exc_info(self.outcome)
+
+    def test_outcome_Extended_py26(self):
+        self.make_26_result()
+        self.check_outcome_details_to_exec_info(self.outcome)
+
+    def test_outcome_Extended_py27(self):
+        self.make_27_result()
+        self.check_outcome_details_to_exec_info(self.outcome)
+
+    def test_outcome_Extended_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_details(self.outcome)
+
+    def test_outcome__no_details(self):
+        self.make_extended_result()
+        self.assertThat(
+            lambda: getattr(self.converter, self.outcome)(self),
+            Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddFailure(
+    TestExtendedToOriginalAddError):
+
+    outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+    TestExtendedToOriginalAddError):
+
+    outcome = 'addExpectedFailure'
+
+    def test_outcome_Original_py26(self):
+        self.make_26_result()
+        self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+    def test_outcome_Extended_py26(self):
+        self.make_26_result()
+        self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+    TestExtendedToOriginalResultDecoratorBase):
+
+    outcome = 'addSkip'
+
+    def test_outcome_Original_py26(self):
+        self.make_26_result()
+        self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+    def test_outcome_Original_py27(self):
+        self.make_27_result()
+        self.check_outcome_string(self.outcome)
+
+    def test_outcome_Original_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_string(self.outcome)
+
+    def test_outcome_Extended_py26(self):
+        self.make_26_result()
+        self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+    def test_outcome_Extended_py27_no_reason(self):
+        self.make_27_result()
+        self.check_outcome_details_to_string(self.outcome)
+
+    def test_outcome_Extended_py27_reason(self):
+        self.make_27_result()
+        self.check_outcome_details_to_arg(self.outcome, 'foo',
+            {'reason': Content(UTF8_TEXT, lambda:[_b('foo')])})
+
+    def test_outcome_Extended_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_details(self.outcome)
+
+    def test_outcome__no_details(self):
+        self.make_extended_result()
+        self.assertThat(
+            lambda: getattr(self.converter, self.outcome)(self),
+            Raises(MatchesException(ValueError)))
+
+
+class TestExtendedToOriginalAddSuccess(
+    TestExtendedToOriginalResultDecoratorBase):
+
+    outcome = 'addSuccess'
+    expected = 'addSuccess'
+
+    def test_outcome_Original_py26(self):
+        self.make_26_result()
+        self.check_outcome_nothing(self.outcome, self.expected)
+
+    def test_outcome_Original_py27(self):
+        self.make_27_result()
+        self.check_outcome_nothing(self.outcome)
+
+    def test_outcome_Original_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_nothing(self.outcome)
+
+    def test_outcome_Extended_py26(self):
+        self.make_26_result()
+        self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+    def test_outcome_Extended_py27(self):
+        self.make_27_result()
+        self.check_outcome_details_to_nothing(self.outcome)
+
+    def test_outcome_Extended_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+    TestExtendedToOriginalResultDecoratorBase):
+
+    outcome = 'addUnexpectedSuccess'
+    expected = 'addFailure'
+
+    def test_outcome_Original_py26(self):
+        self.make_26_result()
+        getattr(self.converter, self.outcome)(self)
+        [event] = self.result._events
+        self.assertEqual((self.expected, self), event[:2])
+
+    def test_outcome_Original_py27(self):
+        self.make_27_result()
+        self.check_outcome_nothing(self.outcome)
+
+    def test_outcome_Original_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_nothing(self.outcome)
+
+    def test_outcome_Extended_py26(self):
+        self.make_26_result()
+        getattr(self.converter, self.outcome)(self)
+        [event] = self.result._events
+        self.assertEqual((self.expected, self), event[:2])
+
+    def test_outcome_Extended_py27(self):
+        self.make_27_result()
+        self.check_outcome_details_to_nothing(self.outcome)
+
+    def test_outcome_Extended_pyextended(self):
+        self.make_extended_result()
+        self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+    TestExtendedToOriginalResultDecoratorBase):
+
+    def test_other_attribute(self):
+        class OtherExtendedResult:
+            def foo(self):
+                return 2
+            bar = 1
+        self.result = OtherExtendedResult()
+        self.make_converter()
+        self.assertEqual(1, self.converter.bar)
+        self.assertEqual(2, self.converter.foo())
+
+
+class TestNonAsciiResults(TestCase):
+    """Test all kinds of tracebacks are cleanly interpreted as unicode
+
+    Currently only uses weak "contains" assertions, would be good to be much
+    stricter about the expected output. This would add a few failures for the
+    current release of IronPython for instance, which gets some traceback
+    lines muddled.
+    """
+
+    _sample_texts = (
+        _u("pa\u026a\u03b8\u0259n"), # Unicode encodings only
+        _u("\u5357\u7121"), # In ISO 2022 encodings
+        _u("\xa7\xa7\xa7"), # In ISO 8859 encodings
+        )
+
+    _is_pypy = "__pypy__" in sys.builtin_module_names
+    # Everything but Jython shows syntax errors on the current character
+    _error_on_character = os.name != "java" and not _is_pypy
+
+    def _run(self, stream, test):
+        """Run the test, the same as in testtools.run but not to stdout"""
+        result = TextTestResult(stream)
+        result.startTestRun()
+        try:
+            return test.run(result)
+        finally:
+            result.stopTestRun()
+
+    def _write_module(self, name, encoding, contents):
+        """Create Python module on disk with contents in given encoding"""
+        try:
+            # Need to pre-check that the coding is valid or codecs.open drops
+            # the file without closing it which breaks non-refcounted pythons
+            codecs.lookup(encoding)
+        except LookupError:
+            self.skip("Encoding unsupported by implementation: %r" % encoding)
+        f = codecs.open(os.path.join(self.dir, name + ".py"), "w", encoding)
+        try:
+            f.write(contents)
+        finally:
+            f.close()
+
+    def _test_external_case(self, testline, coding="ascii", modulelevel="",
+            suffix=""):
+        """Create and run a test case in a seperate module"""
+        self._setup_external_case(testline, coding, modulelevel, suffix)
+        return self._run_external_case()
+
+    def _setup_external_case(self, testline, coding="ascii", modulelevel="",
+            suffix=""):
+        """Create a test case in a seperate module"""
+        _, prefix, self.modname = self.id().rsplit(".", 2)
+        self.dir = tempfile.mkdtemp(prefix=prefix, suffix=suffix)
+        self.addCleanup(shutil.rmtree, self.dir)
+        self._write_module(self.modname, coding,
+            # Older Python 2 versions don't see a coding declaration in a
+            # docstring so it has to be in a comment, but then we can't
+            # workaround bug: <http://ironpython.codeplex.com/workitem/26940>
+            "# coding: %s\n"
+            "import testtools\n"
+            "%s\n"
+            "class Test(testtools.TestCase):\n"
+            "    def runTest(self):\n"
+            "        %s\n" % (coding, modulelevel, testline))
+
+    def _run_external_case(self):
+        """Run the prepared test case in a seperate module"""
+        sys.path.insert(0, self.dir)
+        self.addCleanup(sys.path.remove, self.dir)
+        module = __import__(self.modname)
+        self.addCleanup(sys.modules.pop, self.modname)
+        stream = StringIO()
+        self._run(stream, module.Test())
+        return stream.getvalue()
+
+    def _get_sample_text(self, encoding="unicode_internal"):
+        if encoding is None and str_is_unicode:
+           encoding = "unicode_internal"
+        for u in self._sample_texts:
+            try:
+                b = u.encode(encoding)
+                if u == b.decode(encoding):
+                   if str_is_unicode:
+                       return u, u
+                   return u, b
+            except (LookupError, UnicodeError):
+                pass
+        self.skip("Could not find a sample text for encoding: %r" % encoding)
+
+    def _as_output(self, text):
+        return text
+
+    def test_non_ascii_failure_string(self):
+        """Assertion contents can be non-ascii and should get decoded"""
+        text, raw = self._get_sample_text(_get_exception_encoding())
+        textoutput = self._test_external_case("self.fail(%s)" % _r(raw))
+        self.assertIn(self._as_output(text), textoutput)
+
+    def test_non_ascii_failure_string_via_exec(self):
+        """Assertion via exec can be non-ascii and still gets decoded"""
+        text, raw = self._get_sample_text(_get_exception_encoding())
+        textoutput = self._test_external_case(
+            testline='exec ("self.fail(%s)")' % _r(raw))
+        self.assertIn(self._as_output(text), textoutput)
+
+    def test_control_characters_in_failure_string(self):
+        """Control characters in assertions should be escaped"""
+        textoutput = self._test_external_case("self.fail('\\a\\a\\a')")
+        self.expectFailure("Defense against the beeping horror unimplemented",
+            self.assertNotIn, self._as_output("\a\a\a"), textoutput)
+        self.assertIn(self._as_output(_u("\uFFFD\uFFFD\uFFFD")), textoutput)
+
+    def _local_os_error_matcher(self):
+        if sys.version_info > (3, 3):
+            return MatchesAny(Contains("FileExistsError: "),
+                              Contains("PermissionError: "))
+        elif os.name != "nt":
+            return Contains(self._as_output("OSError: "))
+        else:
+            return Contains(self._as_output("WindowsError: "))
+
+    def test_os_error(self):
+        """Locale error messages from the OS shouldn't break anything"""
+        textoutput = self._test_external_case(
+            modulelevel="import os",
+            testline="os.mkdir('/')")
+        self.assertThat(textoutput, self._local_os_error_matcher())
+
+    def test_assertion_text_shift_jis(self):
+        """A terminal raw backslash in an encoded string is weird but fine"""
+        example_text = _u("\u5341")
+        textoutput = self._test_external_case(
+            coding="shift_jis",
+            testline="self.fail('%s')" % example_text)
+        if str_is_unicode:
+            output_text = example_text
+        else:
+            output_text = example_text.encode("shift_jis").decode(
+                _get_exception_encoding(), "replace")
+        self.assertIn(self._as_output("AssertionError: %s" % output_text),
+            textoutput)
+
+    def test_file_comment_iso2022_jp(self):
+        """Control character escapes must be preserved if valid encoding"""
+        example_text, _ = self._get_sample_text("iso2022_jp")
+        textoutput = self._test_external_case(
+            coding="iso2022_jp",
+            testline="self.fail('Simple') # %s" % example_text)
+        self.assertIn(self._as_output(example_text), textoutput)
+
+    def test_unicode_exception(self):
+        """Exceptions that can be formated losslessly as unicode should be"""
+        example_text, _ = self._get_sample_text()
+        exception_class = (
+            "class FancyError(Exception):\n"
+            # A __unicode__ method does nothing on py3k but the default works
+            "    def __unicode__(self):\n"
+            "        return self.args[0]\n")
+        textoutput = self._test_external_case(
+            modulelevel=exception_class,
+            testline="raise FancyError(%s)" % _r(example_text))
+        self.assertIn(self._as_output(example_text), textoutput)
+
+    def test_unprintable_exception(self):
+        """A totally useless exception instance still prints something"""
+        exception_class = (
+            "class UnprintableError(Exception):\n"
+            "    def __str__(self):\n"
+            "        raise RuntimeError\n"
+            "    def __unicode__(self):\n"
+            "        raise RuntimeError\n"
+            "    def __repr__(self):\n"
+            "        raise RuntimeError\n")
+        textoutput = self._test_external_case(
+            modulelevel=exception_class,
+            testline="raise UnprintableError")
+        self.assertIn(self._as_output(
+            "UnprintableError: <unprintable UnprintableError object>\n"),
+            textoutput)
+
+    def test_non_ascii_dirname(self):
+        """Script paths in the traceback can be non-ascii"""
+        text, raw = self._get_sample_text(sys.getfilesystemencoding())
+        textoutput = self._test_external_case(
+            # Avoid bug in Python 3 by giving a unicode source encoding rather
+            # than just ascii which raises a SyntaxError with no other details
+            coding="utf-8",
+            testline="self.fail('Simple')",
+            suffix=raw)
+        self.assertIn(self._as_output(text), textoutput)
+
+    def test_syntax_error(self):
+        """Syntax errors should still have fancy special-case formatting"""
+        textoutput = self._test_external_case("exec ('f(a, b c)')")
+        self.assertIn(self._as_output(
+            '  File "<string>", line 1\n'
+            '    f(a, b c)\n'
+            + ' ' * self._error_on_character +
+            '          ^\n'
+            'SyntaxError: '
+            ), textoutput)
+
+    def test_syntax_error_malformed(self):
+        """Syntax errors with bogus parameters should break anything"""
+        textoutput = self._test_external_case("raise SyntaxError(3, 2, 1)")
+        self.assertIn(self._as_output("\nSyntaxError: "), textoutput)
+
+    def test_syntax_error_import_binary(self):
+        """Importing a binary file shouldn't break SyntaxError formatting"""
+        self._setup_external_case("import bad")
+        f = open(os.path.join(self.dir, "bad.py"), "wb")
+        try:
+            f.write(_b("x\x9c\xcb*\xcd\xcb\x06\x00\x04R\x01\xb9"))
+        finally:
+            f.close()
+        textoutput = self._run_external_case()
+        matches_error = MatchesAny(
+            Contains('\nTypeError: '), Contains('\nSyntaxError: '))
+        self.assertThat(textoutput, matches_error)
+
+    def test_syntax_error_line_iso_8859_1(self):
+        """Syntax error on a latin-1 line shows the line decoded"""
+        text, raw = self._get_sample_text("iso-8859-1")
+        textoutput = self._setup_external_case("import bad")
+        self._write_module("bad", "iso-8859-1",
+            "# coding: iso-8859-1\n! = 0 # %s\n" % text)
+        textoutput = self._run_external_case()
+        self.assertIn(self._as_output(_u(
+            #'bad.py", line 2\n'
+            '    ! = 0 # %s\n'
+            '    ^\n'
+            'SyntaxError: ') %
+            (text,)), textoutput)
+
+    def test_syntax_error_line_iso_8859_5(self):
+        """Syntax error on a iso-8859-5 line shows the line decoded"""
+        text, raw = self._get_sample_text("iso-8859-5")
+        textoutput = self._setup_external_case("import bad")
+        self._write_module("bad", "iso-8859-5",
+            "# coding: iso-8859-5\n%% = 0 # %s\n" % text)
+        textoutput = self._run_external_case()
+        self.assertThat(
+            textoutput,
+            MatchesRegex(
+                self._as_output(_u(
+                #'bad.py", line 2\n'
+                '.*%% = 0 # %s\n'
+                + ' ' * self._error_on_character +
+                '\\s*\\^\n'
+                'SyntaxError:.*') %
+                (text,)),
+            re.MULTILINE | re.DOTALL)
+        )
+
+    def test_syntax_error_line_euc_jp(self):
+        """Syntax error on a euc_jp line shows the line decoded"""
+        text, raw = self._get_sample_text("euc_jp")
+        textoutput = self._setup_external_case("import bad")
+        self._write_module("bad", "euc_jp",
+            "# coding: euc_jp\n$ = 0 # %s\n" % text)
+        textoutput = self._run_external_case()
+        # pypy uses cpython's multibyte codecs so has their behavior here
+        if self._is_pypy:
+            self._error_on_character = True
+        self.assertIn(self._as_output(_u(
+            #'bad.py", line 2\n'
+            '    $ = 0 # %s\n'
+            + ' ' * self._error_on_character +
+            '   ^\n'
+            'SyntaxError: ') %
+            (text,)), textoutput)
+
+    def test_syntax_error_line_utf_8(self):
+        """Syntax error on a utf-8 line shows the line decoded"""
+        text, raw = self._get_sample_text("utf-8")
+        textoutput = self._setup_external_case("import bad")
+        self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
+        textoutput = self._run_external_case()
+        self.assertThat(
+            textoutput,
+            MatchesRegex(
+                self._as_output(_u(
+                    '.*bad.py", line 1\n'
+                    '\\s*\\^ = 0 # %s\n'
+                    + ' ' * self._error_on_character +
+                    '\\s*\\^\n'
+                    'SyntaxError:.*') % text),
+                re.M | re.S)
+        )
+
+
+class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
+    """Test that running under unittest produces clean ascii strings"""
+
+    def _run(self, stream, test):
+        from unittest import TextTestRunner as _Runner
+        return _Runner(stream).run(test)
+
+    def _as_output(self, text):
+        if str_is_unicode:
+            return text
+        return text.encode("utf-8")
+
+
+class TestDetailsToStr(TestCase):
+
+    def test_no_details(self):
+        string = _details_to_str({})
+        self.assertThat(string, Equals(''))
+
+    def test_binary_content(self):
+        content = content_from_stream(
+            StringIO('foo'), content_type=ContentType('image', 'jpeg'))
+        string = _details_to_str({'attachment': content})
+        self.assertThat(
+            string, Equals("""\
+Binary content:
+  attachment (image/jpeg)
+"""))
+
+    def test_single_line_content(self):
+        content = text_content('foo')
+        string = _details_to_str({'attachment': content})
+        self.assertThat(string, Equals('attachment: {{{foo}}}\n'))
+
+    def test_multi_line_text_content(self):
+        content = text_content('foo\nbar\nbaz')
+        string = _details_to_str({'attachment': content})
+        self.assertThat(string, Equals('attachment: {{{\nfoo\nbar\nbaz\n}}}\n'))
+
+    def test_special_text_content(self):
+        content = text_content('foo')
+        string = _details_to_str({'attachment': content}, special='attachment')
+        self.assertThat(string, Equals('foo\n'))
+
+    def test_multiple_text_content(self):
+        string = _details_to_str(
+            {'attachment': text_content('foo\nfoo'),
+             'attachment-1': text_content('bar\nbar')})
+        self.assertThat(
+            string, Equals('attachment: {{{\n'
+                           'foo\n'
+                           'foo\n'
+                           '}}}\n'
+                           '\n'
+                           'attachment-1: {{{\n'
+                           'bar\n'
+                           'bar\n'
+                           '}}}\n'))
+
+    def test_empty_attachment(self):
+        string = _details_to_str({'attachment': text_content('')})
+        self.assertThat(
+            string, Equals("""\
+Empty attachments:
+  attachment
+"""))
+
+    def test_lots_of_different_attachments(self):
+        jpg = lambda x: content_from_stream(
+            StringIO(x), ContentType('image', 'jpeg'))
+        attachments = {
+            'attachment': text_content('foo'),
+            'attachment-1': text_content('traceback'),
+            'attachment-2': jpg('pic1'),
+            'attachment-3': text_content('bar'),
+            'attachment-4': text_content(''),
+            'attachment-5': jpg('pic2'),
+            }
+        string = _details_to_str(attachments, special='attachment-1')
+        self.assertThat(
+            string, Equals("""\
+Binary content:
+  attachment-2 (image/jpeg)
+  attachment-5 (image/jpeg)
+Empty attachments:
+  attachment-4
+
+attachment: {{{foo}}}
+attachment-3: {{{bar}}}
+
+traceback
+"""))
+
+
+class TestByTestResultTests(TestCase):
+
+    def setUp(self):
+        super(TestByTestResultTests, self).setUp()
+        self.log = []
+        self.result = TestByTestResult(self.on_test)
+        now = iter(range(5))
+        self.result._now = lambda: advance_iterator(now)
+
+    def assertCalled(self, **kwargs):
+        defaults = {
+            'test': self,
+            'tags': set(),
+            'details': None,
+            'start_time': 0,
+            'stop_time': 1,
+            }
+        defaults.update(kwargs)
+        self.assertEqual([defaults], self.log)
+
+    def on_test(self, **kwargs):
+        self.log.append(kwargs)
+
+    def test_no_tests_nothing_reported(self):
+        self.result.startTestRun()
+        self.result.stopTestRun()
+        self.assertEqual([], self.log)
+
+    def test_add_success(self):
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertCalled(status='success')
+
+    def test_add_success_details(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addSuccess(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', details=details)
+
+    def test_global_tags(self):
+        self.result.tags(['foo'], [])
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', tags=set(['foo']))
+
+    def test_local_tags(self):
+        self.result.tags(['foo'], [])
+        self.result.startTest(self)
+        self.result.tags(['bar'], [])
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', tags=set(['foo', 'bar']))
+
+    def test_add_error(self):
+        self.result.startTest(self)
+        try:
+            1/0
+        except ZeroDivisionError:
+            error = sys.exc_info()
+        self.result.addError(self, error)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='error',
+            details={'traceback': TracebackContent(error, self)})
+
+    def test_add_error_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addError(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='error', details=details)
+
+    def test_add_failure(self):
+        self.result.startTest(self)
+        try:
+            self.fail("intentional failure")
+        except self.failureException:
+            failure = sys.exc_info()
+        self.result.addFailure(self, failure)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='failure',
+            details={'traceback': TracebackContent(failure, self)})
+
+    def test_add_failure_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addFailure(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='failure', details=details)
+
+    def test_add_xfail(self):
+        self.result.startTest(self)
+        try:
+            1/0
+        except ZeroDivisionError:
+            error = sys.exc_info()
+        self.result.addExpectedFailure(self, error)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='xfail',
+            details={'traceback': TracebackContent(error, self)})
+
+    def test_add_xfail_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addExpectedFailure(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='xfail', details=details)
+
+    def test_add_unexpected_success(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addUnexpectedSuccess(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', details=details)
+
+    def test_add_skip_reason(self):
+        self.result.startTest(self)
+        reason = self.getUniqueString()
+        self.result.addSkip(self, reason)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='skip', details={'reason': text_content(reason)})
+
+    def test_add_skip_details(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addSkip(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='skip', details=details)
+
+    def test_twice(self):
+        self.result.startTest(self)
+        self.result.addSuccess(self, details={'foo': 'bar'})
+        self.result.stopTest(self)
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertEqual(
+            [{'test': self,
+              'status': 'success',
+              'start_time': 0,
+              'stop_time': 1,
+              'tags': set(),
+              'details': {'foo': 'bar'}},
+             {'test': self,
+              'status': 'success',
+              'start_time': 2,
+              'stop_time': 3,
+              'tags': set(),
+              'details': None},
+             ],
+            self.log)
+
+
+class TestTagger(TestCase):
+
+    def test_tags_tests(self):
+        result = ExtendedTestResult()
+        tagger = Tagger(result, set(['foo']), set(['bar']))
+        test1, test2 = self, make_test()
+        tagger.startTest(test1)
+        tagger.addSuccess(test1)
+        tagger.stopTest(test1)
+        tagger.startTest(test2)
+        tagger.addSuccess(test2)
+        tagger.stopTest(test2)
+        self.assertEqual(
+            [('startTest', test1),
+             ('tags', set(['foo']), set(['bar'])),
+             ('addSuccess', test1),
+             ('stopTest', test1),
+             ('startTest', test2),
+             ('tags', set(['foo']), set(['bar'])),
+             ('addSuccess', test2),
+             ('stopTest', test2),
+             ], result._events)
+
+
+class TestTimestampingStreamResult(TestCase):
+
+    def test_startTestRun(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.startTestRun()
+        self.assertEqual([('startTestRun',)], result.targets[0]._events)
+
+    def test_stopTestRun(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.stopTestRun()
+        self.assertEqual([('stopTestRun',)], result.targets[0]._events)
+
+    def test_status_no_timestamp(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.status(test_id="A", test_status="B", test_tags="C",
+            runnable="D", file_name="E", file_bytes=b"F", eof=True,
+            mime_type="G", route_code="H")
+        events = result.targets[0]._events
+        self.assertThat(events, HasLength(1))
+        self.assertThat(events[0], HasLength(11))
+        self.assertEqual(
+            ("status", "A", "B", "C", "D", "E", b"F", True, "G", "H"),
+            events[0][:10])
+        self.assertNotEqual(None, events[0][10])
+        self.assertIsInstance(events[0][10], datetime.datetime)
+
+    def test_status_timestamp(self):
+        result = TimestampingStreamResult(LoggingStreamResult())
+        result.status(timestamp="F")
+        self.assertEqual("F", result.targets[0]._events[0][10])
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_testsuite.py b/third_party/testtools/testtools/tests/test_testsuite.py
new file mode 100644
index 0000000..3bbe63d
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_testsuite.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import doctest
+from functools import partial
+import sys
+import unittest
+
+from extras import try_import
+
+from testtools import (
+    ConcurrentTestSuite,
+    ConcurrentStreamTestSuite,
+    iterate_tests,
+    PlaceHolder,
+    TestByTestResult,
+    TestCase,
+    )
+from testtools.compat import _b, _u
+from testtools.matchers import DocTestMatches
+from testtools.testsuite import FixtureSuite, iterate_tests, sorted_tests
+from testtools.tests.helpers import LoggingResult
+from testtools.testresult.doubles import StreamResult as LoggingStream
+
+FunctionFixture = try_import('fixtures.FunctionFixture')
+
+class Sample(TestCase):
+    def __hash__(self):
+        return id(self)
+    def test_method1(self):
+        pass
+    def test_method2(self):
+        pass
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+    def test_broken_test(self):
+        log = []
+        def on_test(test, status, start_time, stop_time, tags, details):
+            log.append((test.id(), status, set(details.keys())))
+        class BrokenTest(object):
+            # Simple break - no result parameter to run()
+            def __call__(self):
+                pass
+            run = __call__
+        original_suite = unittest.TestSuite([BrokenTest()])
+        suite = ConcurrentTestSuite(original_suite, self.split_suite)
+        suite.run(TestByTestResult(on_test))
+        self.assertEqual([('broken-runner', 'error', set(['traceback']))], log)
+
+    def test_trivial(self):
+        log = []
+        result = LoggingResult(log)
+        test1 = Sample('test_method1')
+        test2 = Sample('test_method2')
+        original_suite = unittest.TestSuite([test1, test2])
+        suite = ConcurrentTestSuite(original_suite, self.split_suite)
+        suite.run(result)
+        # log[0] is the timestamp for the first test starting.
+        test1 = log[1][1]
+        test2 = log[-1][1]
+        self.assertIsInstance(test1, Sample)
+        self.assertIsInstance(test2, Sample)
+        self.assertNotEqual(test1.id(), test2.id())
+
+    def test_wrap_result(self):
+        # ConcurrentTestSuite has a hook for wrapping the per-thread result.
+        wrap_log = []
+
+        def wrap_result(thread_safe_result, thread_number):
+            wrap_log.append(
+                (thread_safe_result.result.decorated, thread_number))
+            return thread_safe_result
+
+        result_log = []
+        result = LoggingResult(result_log)
+        test1 = Sample('test_method1')
+        test2 = Sample('test_method2')
+        original_suite = unittest.TestSuite([test1, test2])
+        suite = ConcurrentTestSuite(
+            original_suite, self.split_suite, wrap_result=wrap_result)
+        suite.run(result)
+        self.assertEqual(
+            [(result, 0),
+             (result, 1),
+             ], wrap_log)
+        # Smoke test to make sure everything ran OK.
+        self.assertNotEqual([], result_log)
+
+    def split_suite(self, suite):
+        return list(iterate_tests(suite))
+
+
+class TestConcurrentStreamTestSuiteRun(TestCase):
+
+    def test_trivial(self):
+        result = LoggingStream()
+        test1 = Sample('test_method1')
+        test2 = Sample('test_method2')
+        cases = lambda:[(test1, '0'), (test2, '1')]
+        suite = ConcurrentStreamTestSuite(cases)
+        suite.run(result)
+        def freeze(set_or_none):
+            if set_or_none is None:
+                return set_or_none
+            return frozenset(set_or_none)
+        # Ignore event order: we're testing the code is all glued together,
+        # which just means we can pump events through and they get route codes
+        # added appropriately.
+        self.assertEqual(set([
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method1',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             '0',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method1',
+             'success',
+             frozenset(),
+             True,
+             None,
+             None,
+             False,
+             None,
+             '0',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method2',
+             'inprogress',
+             None,
+             True,
+             None,
+             None,
+             False,
+             None,
+             '1',
+             None,
+             ),
+            ('status',
+             'testtools.tests.test_testsuite.Sample.test_method2',
+             'success',
+             frozenset(),
+             True,
+             None,
+             None,
+             False,
+             None,
+             '1',
+             None,
+             ),
+            ]), set(event[0:3] + (freeze(event[3]),) + event[4:10] + (None,)
+                for event in result._events))
+
+    def test_broken_runner(self):
+        # If the object called breaks, the stream is informed about it
+        # regardless.
+        class BrokenTest(object):
+            # broken - no result parameter!
+            def __call__(self):
+                pass
+            def run(self):
+                pass
+        result = LoggingStream()
+        cases = lambda:[(BrokenTest(), '0')]
+        suite = ConcurrentStreamTestSuite(cases)
+        suite.run(result)
+        events = result._events
+        # Check the traceback loosely.
+        self.assertThat(events[1][6].decode('utf8'), DocTestMatches("""\
+Traceback (most recent call last):
+  File "...testtools/testsuite.py", line ..., in _run_test
+    test.run(process_result)
+TypeError: run() takes ...1 ...argument...2...given...
+""", doctest.ELLIPSIS))
+        events = [event[0:10] + (None,) for event in events]
+        events[1] = events[1][:6] + (None,) + events[1][7:]
+        self.assertEqual([
+            ('status', "broken-runner-'0'", 'inprogress', None, True, None, None, False, None, _u('0'), None),
+            ('status', "broken-runner-'0'", None, None, True, 'traceback', None,
+             True,
+             'text/x-traceback; charset="utf8"; language="python"',
+             '0',
+             None),
+             ('status', "broken-runner-'0'", 'fail', set(), True, None, None, False, None, _u('0'), None)
+            ], events)
+
+    def split_suite(self, suite):
+        tests = list(enumerate(iterate_tests(suite)))
+        return [(test, _u(str(pos))) for pos, test in tests]
+
+
+class TestFixtureSuite(TestCase):
+
+    def setUp(self):
+        super(TestFixtureSuite, self).setUp()
+        if FunctionFixture is None:
+            self.skip("Need fixtures")
+
+    def test_fixture_suite(self):
+        log = []
+        class Sample(TestCase):
+            def test_one(self):
+                log.append(1)
+            def test_two(self):
+                log.append(2)
+        fixture = FunctionFixture(
+            lambda: log.append('setUp'),
+            lambda fixture: log.append('tearDown'))
+        suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_two')])
+        suite.run(LoggingResult([]))
+        self.assertEqual(['setUp', 1, 2, 'tearDown'], log)
+
+    def test_fixture_suite_sort(self):
+        log = []
+        class Sample(TestCase):
+            def test_one(self):
+                log.append(1)
+            def test_two(self):
+                log.append(2)
+        fixture = FunctionFixture(
+            lambda: log.append('setUp'),
+            lambda fixture: log.append('tearDown'))
+        suite = FixtureSuite(fixture, [Sample('test_one'), Sample('test_one')])
+        self.assertRaises(ValueError, suite.sort_tests)
+
+
+class TestSortedTests(TestCase):
+
+    def test_sorts_custom_suites(self):
+        a = PlaceHolder('a')
+        b = PlaceHolder('b')
+        class Subclass(unittest.TestSuite):
+            def sort_tests(self):
+                self._tests = sorted_tests(self, True)
+        input_suite = Subclass([b, a])
+        suite = sorted_tests(input_suite)
+        self.assertEqual([a, b], list(iterate_tests(suite)))
+        self.assertEqual([input_suite], list(iter(suite)))
+
+    def test_custom_suite_without_sort_tests_works(self):
+        a = PlaceHolder('a')
+        b = PlaceHolder('b')
+        class Subclass(unittest.TestSuite):pass
+        input_suite = Subclass([b, a])
+        suite = sorted_tests(input_suite)
+        self.assertEqual([b, a], list(iterate_tests(suite)))
+        self.assertEqual([input_suite], list(iter(suite)))
+
+    def test_sorts_simple_suites(self):
+        a = PlaceHolder('a')
+        b = PlaceHolder('b')
+        suite = sorted_tests(unittest.TestSuite([b, a]))
+        self.assertEqual([a, b], list(iterate_tests(suite)))
+
+    def test_duplicate_simple_suites(self):
+        a = PlaceHolder('a')
+        b = PlaceHolder('b')
+        c = PlaceHolder('a')
+        self.assertRaises(
+            ValueError, sorted_tests, unittest.TestSuite([a, b, c]))
+
+
+def test_suite():
+    from unittest import TestLoader
+    return TestLoader().loadTestsFromName(__name__)
diff --git a/third_party/testtools/testtools/tests/test_with_with.py b/third_party/testtools/testtools/tests/test_with_with.py
new file mode 100644
index 0000000..f26f0f8
--- /dev/null
+++ b/third_party/testtools/testtools/tests/test_with_with.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 testtools developers. See LICENSE for details.
+
+from __future__ import with_statement
+
+import sys
+
+from testtools import (
+    ExpectedException,
+    TestCase,
+    )
+from testtools.matchers import (
+    AfterPreprocessing,
+    Equals,
+    EndsWith,
+    )
+
+
+class TestExpectedException(TestCase):
+    """Test the ExpectedException context manager."""
+
+    def test_pass_on_raise(self):
+        with ExpectedException(ValueError, 'tes.'):
+            raise ValueError('test')
+
+    def test_pass_on_raise_matcher(self):
+        with ExpectedException(
+            ValueError, AfterPreprocessing(str, Equals('test'))):
+            raise ValueError('test')
+
+    def test_raise_on_text_mismatch(self):
+        try:
+            with ExpectedException(ValueError, 'tes.'):
+                raise ValueError('mismatch')
+        except AssertionError:
+            e = sys.exc_info()[1]
+            self.assertEqual("'mismatch' does not match /tes./", str(e))
+        else:
+            self.fail('AssertionError not raised.')
+
+    def test_raise_on_general_mismatch(self):
+        matcher = AfterPreprocessing(str, Equals('test'))
+        value_error = ValueError('mismatch')
+        try:
+            with ExpectedException(ValueError, matcher):
+                raise value_error
+        except AssertionError:
+            e = sys.exc_info()[1]
+            self.assertEqual(matcher.match(value_error).describe(), str(e))
+        else:
+            self.fail('AssertionError not raised.')
+
+    def test_raise_on_error_mismatch(self):
+        try:
+            with ExpectedException(TypeError, 'tes.'):
+                raise ValueError('mismatch')
+        except ValueError:
+            e = sys.exc_info()[1]
+            self.assertEqual('mismatch', str(e))
+        else:
+            self.fail('ValueError not raised.')
+
+    def test_raise_if_no_exception(self):
+        try:
+            with ExpectedException(TypeError, 'tes.'):
+                pass
+        except AssertionError:
+            e = sys.exc_info()[1]
+            self.assertEqual('TypeError not raised.', str(e))
+        else:
+            self.fail('AssertionError not raised.')
+
+    def test_pass_on_raise_any_message(self):
+        with ExpectedException(ValueError):
+            raise ValueError('whatever')
+
+    def test_annotate(self):
+        def die():
+            with ExpectedException(ValueError, msg="foo"):
+                pass
+        exc = self.assertRaises(AssertionError, die)
+        self.assertThat(exc.args[0], EndsWith(': foo'))
+
+    def test_annotated_matcher(self):
+        def die():
+            with ExpectedException(ValueError, 'bar', msg="foo"):
+                pass
+        exc = self.assertRaises(AssertionError, die)
+        self.assertThat(exc.args[0], EndsWith(': foo'))
diff --git a/third_party/testtools/testtools/testsuite.py b/third_party/testtools/testtools/testsuite.py
new file mode 100644
index 0000000..e2945f3
--- /dev/null
+++ b/third_party/testtools/testtools/testsuite.py
@@ -0,0 +1,317 @@
+# Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
+
+"""Test suites and related things."""
+
+__metaclass__ = type
+__all__ = [
+  'ConcurrentTestSuite',
+  'ConcurrentStreamTestSuite',
+  'filter_by_ids',
+  'iterate_tests',
+  'sorted_tests',
+  ]
+
+import sys
+import threading
+import unittest
+
+from extras import safe_hasattr, try_imports
+
+Queue = try_imports(['Queue.Queue', 'queue.Queue'])
+
+import testtools
+
+
+def iterate_tests(test_suite_or_case):
+    """Iterate through all of the test cases in 'test_suite_or_case'."""
+    try:
+        suite = iter(test_suite_or_case)
+    except TypeError:
+        yield test_suite_or_case
+    else:
+        for test in suite:
+            for subtest in iterate_tests(test):
+                yield subtest
+
+
+class ConcurrentTestSuite(unittest.TestSuite):
+    """A TestSuite whose run() calls out to a concurrency strategy."""
+
+    def __init__(self, suite, make_tests, wrap_result=None):
+        """Create a ConcurrentTestSuite to execute suite.
+
+        :param suite: A suite to run concurrently.
+        :param make_tests: A helper function to split the tests in the
+            ConcurrentTestSuite into some number of concurrently executing
+            sub-suites. make_tests must take a suite, and return an iterable
+            of TestCase-like object, each of which must have a run(result)
+            method.
+        :param wrap_result: An optional function that takes a thread-safe
+            result and a thread number and must return a ``TestResult``
+            object. If not provided, then ``ConcurrentTestSuite`` will just
+            use a ``ThreadsafeForwardingResult`` wrapped around the result
+            passed to ``run()``.
+        """
+        super(ConcurrentTestSuite, self).__init__([suite])
+        self.make_tests = make_tests
+        if wrap_result:
+            self._wrap_result = wrap_result
+
+    def _wrap_result(self, thread_safe_result, thread_number):
+        """Wrap a thread-safe result before sending it test results.
+
+        You can either override this in a subclass or pass your own
+        ``wrap_result`` in to the constructor.  The latter is preferred.
+        """
+        return thread_safe_result
+
+    def run(self, result):
+        """Run the tests concurrently.
+
+        This calls out to the provided make_tests helper, and then serialises
+        the results so that result only sees activity from one TestCase at
+        a time.
+
+        ConcurrentTestSuite provides no special mechanism to stop the tests
+        returned by make_tests, it is up to the make_tests to honour the
+        shouldStop attribute on the result object they are run with, which will
+        be set if an exception is raised in the thread which
+        ConcurrentTestSuite.run is called in.
+        """
+        tests = self.make_tests(self)
+        try:
+            threads = {}
+            queue = Queue()
+            semaphore = threading.Semaphore(1)
+            for i, test in enumerate(tests):
+                process_result = self._wrap_result(
+                    testtools.ThreadsafeForwardingResult(result, semaphore), i)
+                reader_thread = threading.Thread(
+                    target=self._run_test, args=(test, process_result, queue))
+                threads[test] = reader_thread, process_result
+                reader_thread.start()
+            while threads:
+                finished_test = queue.get()
+                threads[finished_test][0].join()
+                del threads[finished_test]
+        except:
+            for thread, process_result in threads.values():
+                process_result.stop()
+            raise
+
+    def _run_test(self, test, process_result, queue):
+        try:
+            try:
+                test.run(process_result)
+            except Exception as e:
+                # The run logic itself failed.
+                case = testtools.ErrorHolder(
+                    "broken-runner",
+                    error=sys.exc_info())
+                case.run(process_result)
+        finally:
+            queue.put(test)
+
+
+class ConcurrentStreamTestSuite(object):
+    """A TestSuite whose run() parallelises."""
+
+    def __init__(self, make_tests):
+        """Create a ConcurrentTestSuite to execute tests returned by make_tests.
+
+        :param make_tests: A helper function that should return some number
+            of concurrently executable test suite / test case objects.
+            make_tests must take no parameters and return an iterable of
+            tuples. Each tuple must be of the form (case, route_code), where
+            case is a TestCase-like object with a run(result) method, and
+            route_code is either None or a unicode string.
+        """
+        super(ConcurrentStreamTestSuite, self).__init__()
+        self.make_tests = make_tests
+
+    def run(self, result):
+        """Run the tests concurrently.
+
+        This calls out to the provided make_tests helper to determine the
+        concurrency to use and to assign routing codes to each worker.
+
+        ConcurrentTestSuite provides no special mechanism to stop the tests
+        returned by make_tests, it is up to the made tests to honour the
+        shouldStop attribute on the result object they are run with, which will
+        be set if the test run is to be aborted.
+
+        The tests are run with an ExtendedToStreamDecorator wrapped around a
+        StreamToQueue instance. ConcurrentStreamTestSuite dequeues events from
+        the queue and forwards them to result. Tests can therefore be either
+        original unittest tests (or compatible tests), or new tests that emit
+        StreamResult events directly.
+
+        :param result: A StreamResult instance. The caller is responsible for
+            calling startTestRun on this instance prior to invoking suite.run,
+            and stopTestRun subsequent to the run method returning.
+        """
+        tests = self.make_tests()
+        try:
+            threads = {}
+            queue = Queue()
+            for test, route_code in tests:
+                to_queue = testtools.StreamToQueue(queue, route_code)
+                process_result = testtools.ExtendedToStreamDecorator(
+                    testtools.TimestampingStreamResult(to_queue))
+                runner_thread = threading.Thread(
+                    target=self._run_test,
+                    args=(test, process_result, route_code))
+                threads[to_queue] = runner_thread, process_result
+                runner_thread.start()
+            while threads:
+                event_dict = queue.get()
+                event = event_dict.pop('event')
+                if event == 'status':
+                    result.status(**event_dict)
+                elif event == 'stopTestRun':
+                    thread = threads.pop(event_dict['result'])[0]
+                    thread.join()
+                elif event == 'startTestRun':
+                    pass
+                else:
+                    raise ValueError('unknown event type %r' % (event,))
+        except:
+            for thread, process_result in threads.values():
+                # Signal to each TestControl in the ExtendedToStreamDecorator
+                # that the thread should stop running tests and cleanup
+                process_result.stop()
+            raise
+
+    def _run_test(self, test, process_result, route_code):
+        process_result.startTestRun()
+        try:
+            try:
+                test.run(process_result)
+            except Exception as e:
+                # The run logic itself failed.
+                case = testtools.ErrorHolder(
+                    "broken-runner-'%s'" % (route_code,),
+                    error=sys.exc_info())
+                case.run(process_result)
+        finally:
+            process_result.stopTestRun()
+
+
+class FixtureSuite(unittest.TestSuite):
+
+    def __init__(self, fixture, tests):
+        super(FixtureSuite, self).__init__(tests)
+        self._fixture = fixture
+
+    def run(self, result):
+        self._fixture.setUp()
+        try:
+            super(FixtureSuite, self).run(result)
+        finally:
+            self._fixture.cleanUp()
+
+    def sort_tests(self):
+        self._tests = sorted_tests(self, True)
+
+
+def _flatten_tests(suite_or_case, unpack_outer=False):
+    try:
+        tests = iter(suite_or_case)
+    except TypeError:
+        # Not iterable, assume it's a test case.
+        return [(suite_or_case.id(), suite_or_case)]
+    if (type(suite_or_case) in (unittest.TestSuite,) or
+        unpack_outer):
+        # Plain old test suite (or any others we may add).
+        result = []
+        for test in tests:
+            # Recurse to flatten.
+            result.extend(_flatten_tests(test))
+        return result
+    else:
+        # Find any old actual test and grab its id.
+        suite_id = None
+        tests = iterate_tests(suite_or_case)
+        for test in tests:
+            suite_id = test.id()
+            break
+        # If it has a sort_tests method, call that.
+        if safe_hasattr(suite_or_case, 'sort_tests'):
+            suite_or_case.sort_tests()
+        return [(suite_id, suite_or_case)]
+
+
+def filter_by_ids(suite_or_case, test_ids):
+    """Remove tests from suite_or_case where their id is not in test_ids.
+
+    :param suite_or_case: A test suite or test case.
+    :param test_ids: Something that supports the __contains__ protocol.
+    :return: suite_or_case, unless suite_or_case was a case that itself
+        fails the predicate when it will return a new unittest.TestSuite with
+        no contents.
+
+    This helper exists to provide backwards compatability with older versions
+    of Python (currently all versions :)) that don't have a native
+    filter_by_ids() method on Test(Case|Suite).
+
+    For subclasses of TestSuite, filtering is done by:
+        - attempting to call suite.filter_by_ids(test_ids)
+        - if there is no method, iterating the suite and identifying tests to
+          remove, then removing them from _tests, manually recursing into
+          each entry.
+
+    For objects with an id() method - TestCases, filtering is done by:
+        - attempting to return case.filter_by_ids(test_ids)
+        - if there is no such method, checking for case.id() in test_ids
+          and returning case if it is, or TestSuite() if it is not.
+
+    For anything else, it is not filtered - it is returned as-is.
+
+    To provide compatability with this routine for a custom TestSuite, just
+    define a filter_by_ids() method that will return a TestSuite equivalent to
+    the original minus any tests not in test_ids.
+    Similarly to provide compatability for a custom TestCase that does
+    something unusual define filter_by_ids to return a new TestCase object
+    that will only run test_ids that are in the provided container. If none
+    would run, return an empty TestSuite().
+
+    The contract for this function does not require mutation - each filtered
+    object can choose to return a new object with the filtered tests. However
+    because existing custom TestSuite classes in the wild do not have this
+    method, we need a way to copy their state correctly which is tricky:
+    thus the backwards-compatible code paths attempt to mutate in place rather
+    than guessing how to reconstruct a new suite.
+    """
+    # Compatible objects
+    if safe_hasattr(suite_or_case, 'filter_by_ids'):
+        return suite_or_case.filter_by_ids(test_ids)
+    # TestCase objects.
+    if safe_hasattr(suite_or_case, 'id'):
+        if suite_or_case.id() in test_ids:
+            return suite_or_case
+        else:
+            return unittest.TestSuite()
+    # Standard TestSuites or derived classes [assumed to be mutable].
+    if isinstance(suite_or_case, unittest.TestSuite):
+        filtered = []
+        for item in suite_or_case:
+            filtered.append(filter_by_ids(item, test_ids))
+        suite_or_case._tests[:] = filtered
+    # Everything else:
+    return suite_or_case
+
+
+def sorted_tests(suite_or_case, unpack_outer=False):
+    """Sort suite_or_case while preserving non-vanilla TestSuites."""
+    # Duplicate test id can induce TypeError in Python 3.3.
+    # Detect the duplicate test id, raise exception when found.
+    seen = set()
+    for test_case in iterate_tests(suite_or_case):
+        test_id = test_case.id()
+        if test_id not in seen:
+            seen.add(test_id)
+        else:
+            raise ValueError('Duplicate test id detected: %s' % (test_id,))
+    tests = _flatten_tests(suite_or_case, unpack_outer=unpack_outer)
+    tests.sort()
+    return unittest.TestSuite([test for (sort_key, test) in tests])
diff --git a/third_party/testtools/testtools/utils.py b/third_party/testtools/testtools/utils.py
new file mode 100644
index 0000000..ddd841f
--- /dev/null
+++ b/third_party/testtools/testtools/utils.py
@@ -0,0 +1,12 @@
+# Copyright (c) 2008-2010 testtools developers. See LICENSE for details.
+
+"""Utilities for dealing with stuff in unittest.
+
+Legacy - deprecated - use testtools.testsuite.iterate_tests
+"""
+
+import warnings
+warnings.warn("Please import iterate_tests from testtools.testsuite - "
+    "testtools.utils is deprecated.", DeprecationWarning, stacklevel=2)
+
+from testtools.testsuite import iterate_tests
diff --git a/third_party/wscript_build b/third_party/wscript_build
index a86732e..63f616e 100644
--- a/third_party/wscript_build
+++ b/third_party/wscript_build
@@ -7,6 +7,7 @@ external_libs = {
     "dns.resolver": "dnspython/dns",
     "mimeparse": "mimeparse",
     "extras": "python-extras/extras",
+    "testtools": "testtools/testtools",
     }
 
 list = []
-- 
2.1.3



More information about the samba-technical mailing list