[PATCH 07/13] Move subunit to third_party/.

Jelmer Vernooij jelmer at samba.org
Sat Nov 15 12:17:02 MST 2014


Change-Id: I9244f86fe3de17d5908d65e2d4f6c2ec7ef9b167
Signed-off-by: Jelmer Vernooij <jelmer at samba.org>
---
 buildtools/wafsamba/test_duplicate_symbol.sh       |    2 +-
 lib/subunit/.gitignore                             |   56 -
 lib/subunit/Apache-2.0                             |  202 ---
 lib/subunit/BSD                                    |   26 -
 lib/subunit/COPYING                                |   36 -
 lib/subunit/INSTALL                                |   35 -
 lib/subunit/MANIFEST.in                            |   20 -
 lib/subunit/Makefile.am                            |  147 ---
 lib/subunit/NEWS                                   |  547 --------
 lib/subunit/README                                 |  470 -------
 lib/subunit/all_tests.py                           |   36 -
 lib/subunit/c++/README                             |   50 -
 lib/subunit/c++/SubunitTestProgressListener.cpp    |   63 -
 lib/subunit/c++/SubunitTestProgressListener.h      |   56 -
 lib/subunit/c/README                               |   68 -
 lib/subunit/c/include/subunit/child.h              |   96 --
 lib/subunit/c/lib/child.c                          |  104 --
 lib/subunit/c/tests/test_child.c                   |  234 ----
 lib/subunit/c/wscript                              |   16 -
 lib/subunit/configure.ac                           |   76 --
 lib/subunit/filters/subunit-1to2                   |   42 -
 lib/subunit/filters/subunit-2to1                   |   47 -
 lib/subunit/filters/subunit-filter                 |  165 ---
 lib/subunit/filters/subunit-ls                     |   60 -
 lib/subunit/filters/subunit-notify                 |   48 -
 lib/subunit/filters/subunit-output                 |   23 -
 lib/subunit/filters/subunit-stats                  |   32 -
 lib/subunit/filters/subunit-tags                   |   27 -
 lib/subunit/filters/subunit2csv                    |   26 -
 lib/subunit/filters/subunit2gtk                    |  240 ----
 lib/subunit/filters/subunit2junitxml               |   36 -
 lib/subunit/filters/subunit2pyunit                 |   59 -
 lib/subunit/filters/tap2subunit                    |   26 -
 lib/subunit/libcppunit_subunit.pc.in               |   11 -
 lib/subunit/libsubunit.pc.in                       |   11 -
 lib/subunit/perl/Makefile.PL.in                    |   21 -
 lib/subunit/perl/lib/Subunit.pm                    |  183 ---
 lib/subunit/perl/lib/Subunit/Diff.pm               |   85 --
 lib/subunit/perl/subunit-diff                      |   31 -
 lib/subunit/python/iso8601/LICENSE                 |   20 -
 lib/subunit/python/iso8601/README                  |   26 -
 lib/subunit/python/iso8601/README.subunit          |    5 -
 lib/subunit/python/iso8601/setup.py                |   58 -
 lib/subunit/python/iso8601/test_iso8601.py         |  111 --
 lib/subunit/python/subunit/__init__.py             | 1320 -------------------
 lib/subunit/python/subunit/_output.py              |  203 ---
 lib/subunit/python/subunit/chunked.py              |  185 ---
 lib/subunit/python/subunit/details.py              |  119 --
 lib/subunit/python/subunit/filters.py              |  206 ---
 lib/subunit/python/subunit/iso8601.py              |  133 --
 lib/subunit/python/subunit/progress_model.py       |  106 --
 lib/subunit/python/subunit/run.py                  |  145 ---
 lib/subunit/python/subunit/test_results.py         |  728 -----------
 lib/subunit/python/subunit/tests/__init__.py       |   69 -
 lib/subunit/python/subunit/tests/sample-script.py  |   21 -
 .../python/subunit/tests/sample-two-script.py      |    7 -
 lib/subunit/python/subunit/tests/test_chunked.py   |  146 ---
 lib/subunit/python/subunit/tests/test_details.py   |  106 --
 lib/subunit/python/subunit/tests/test_filters.py   |   35 -
 .../python/subunit/tests/test_output_filter.py     |  596 ---------
 .../python/subunit/tests/test_progress_model.py    |  112 --
 lib/subunit/python/subunit/tests/test_run.py       |   88 --
 .../python/subunit/tests/test_subunit_filter.py    |  346 -----
 .../python/subunit/tests/test_subunit_stats.py     |   78 --
 .../python/subunit/tests/test_subunit_tags.py      |   85 --
 .../python/subunit/tests/test_tap2subunit.py       |  387 ------
 .../python/subunit/tests/test_test_protocol.py     | 1362 --------------------
 .../python/subunit/tests/test_test_protocol2.py    |  436 -------
 .../python/subunit/tests/test_test_results.py      |  566 --------
 lib/subunit/python/subunit/v2.py                   |  494 -------
 lib/subunit/setup.py                               |   75 --
 lib/subunit/shell/README                           |   62 -
 lib/subunit/shell/share/subunit.sh                 |   61 -
 lib/subunit/shell/tests/test_function_output.sh    |   97 --
 lib/subunit/shell/tests/test_source_library.sh     |  108 --
 lib/update-external.sh                             |    6 +-
 lib/wscript_build                                  |    1 -
 python/samba/tests/subunitrun.py                   |    2 +-
 selftest/selftesthelpers.py                        |    8 +-
 selftest/subunithelper.py                          |    2 +-
 source4/selftest/test_samba3dump.sh                |    2 +-
 third_party/subunit/.gitignore                     |   56 +
 third_party/subunit/Apache-2.0                     |  202 +++
 third_party/subunit/BSD                            |   26 +
 third_party/subunit/COPYING                        |   36 +
 third_party/subunit/INSTALL                        |   35 +
 third_party/subunit/MANIFEST.in                    |   20 +
 third_party/subunit/Makefile.am                    |  147 +++
 third_party/subunit/NEWS                           |  547 ++++++++
 third_party/subunit/README                         |  469 +++++++
 third_party/subunit/all_tests.py                   |   36 +
 third_party/subunit/c++/README                     |   50 +
 .../subunit/c++/SubunitTestProgressListener.cpp    |   63 +
 .../subunit/c++/SubunitTestProgressListener.h      |   55 +
 third_party/subunit/c/README                       |   68 +
 third_party/subunit/c/include/subunit/child.h      |   96 ++
 third_party/subunit/c/lib/child.c                  |  104 ++
 third_party/subunit/c/tests/test_child.c           |  234 ++++
 third_party/subunit/c/wscript                      |   16 +
 third_party/subunit/configure.ac                   |   76 ++
 third_party/subunit/filters/subunit-1to2           |   42 +
 third_party/subunit/filters/subunit-2to1           |   47 +
 third_party/subunit/filters/subunit-filter         |  165 +++
 third_party/subunit/filters/subunit-ls             |   60 +
 third_party/subunit/filters/subunit-notify         |   48 +
 third_party/subunit/filters/subunit-output         |   23 +
 third_party/subunit/filters/subunit-stats          |   32 +
 third_party/subunit/filters/subunit-tags           |   27 +
 third_party/subunit/filters/subunit2csv            |   26 +
 third_party/subunit/filters/subunit2gtk            |  240 ++++
 third_party/subunit/filters/subunit2junitxml       |   36 +
 third_party/subunit/filters/subunit2pyunit         |   59 +
 third_party/subunit/filters/tap2subunit            |   26 +
 third_party/subunit/libcppunit_subunit.pc.in       |   11 +
 third_party/subunit/libsubunit.pc.in               |   11 +
 third_party/subunit/perl/Makefile.PL.in            |   21 +
 third_party/subunit/perl/lib/Subunit.pm            |  183 +++
 third_party/subunit/perl/lib/Subunit/Diff.pm       |   85 ++
 third_party/subunit/perl/subunit-diff              |   31 +
 third_party/subunit/python/iso8601/LICENSE         |   20 +
 third_party/subunit/python/iso8601/README          |   26 +
 third_party/subunit/python/iso8601/README.subunit  |    5 +
 third_party/subunit/python/iso8601/setup.py        |   58 +
 third_party/subunit/python/iso8601/test_iso8601.py |  111 ++
 third_party/subunit/python/subunit/__init__.py     | 1320 +++++++++++++++++++
 third_party/subunit/python/subunit/_output.py      |  203 +++
 third_party/subunit/python/subunit/chunked.py      |  185 +++
 third_party/subunit/python/subunit/details.py      |  119 ++
 third_party/subunit/python/subunit/filters.py      |  206 +++
 third_party/subunit/python/subunit/iso8601.py      |  133 ++
 .../subunit/python/subunit/progress_model.py       |  105 ++
 third_party/subunit/python/subunit/run.py          |  145 +++
 third_party/subunit/python/subunit/test_results.py |  728 +++++++++++
 .../subunit/python/subunit/tests/__init__.py       |   69 +
 .../subunit/python/subunit/tests/sample-script.py  |   21 +
 .../python/subunit/tests/sample-two-script.py      |    7 +
 .../subunit/python/subunit/tests/test_chunked.py   |  146 +++
 .../subunit/python/subunit/tests/test_details.py   |  106 ++
 .../subunit/python/subunit/tests/test_filters.py   |   35 +
 .../python/subunit/tests/test_output_filter.py     |  596 +++++++++
 .../python/subunit/tests/test_progress_model.py    |  112 ++
 .../subunit/python/subunit/tests/test_run.py       |   88 ++
 .../python/subunit/tests/test_subunit_filter.py    |  346 +++++
 .../python/subunit/tests/test_subunit_stats.py     |   78 ++
 .../python/subunit/tests/test_subunit_tags.py      |   85 ++
 .../python/subunit/tests/test_tap2subunit.py       |  387 ++++++
 .../python/subunit/tests/test_test_protocol.py     | 1362 ++++++++++++++++++++
 .../python/subunit/tests/test_test_protocol2.py    |  436 +++++++
 .../python/subunit/tests/test_test_results.py      |  566 ++++++++
 third_party/subunit/python/subunit/v2.py           |  494 +++++++
 third_party/subunit/setup.py                       |   75 ++
 third_party/subunit/shell/README                   |   62 +
 third_party/subunit/shell/share/subunit.sh         |   59 +
 .../subunit/shell/tests/test_function_output.sh    |   97 ++
 .../subunit/shell/tests/test_source_library.sh     |  107 ++
 third_party/wscript_build                          |    2 +
 wscript                                            |    1 -
 wscript_build                                      |    1 -
 158 files changed, 12220 insertions(+), 12227 deletions(-)
 delete mode 100644 lib/subunit/.gitignore
 delete mode 100644 lib/subunit/Apache-2.0
 delete mode 100644 lib/subunit/BSD
 delete mode 100644 lib/subunit/COPYING
 delete mode 100644 lib/subunit/INSTALL
 delete mode 100644 lib/subunit/MANIFEST.in
 delete mode 100644 lib/subunit/Makefile.am
 delete mode 100644 lib/subunit/NEWS
 delete mode 100644 lib/subunit/README
 delete mode 100644 lib/subunit/all_tests.py
 delete mode 100644 lib/subunit/c++/README
 delete mode 100644 lib/subunit/c++/SubunitTestProgressListener.cpp
 delete mode 100644 lib/subunit/c++/SubunitTestProgressListener.h
 delete mode 100644 lib/subunit/c/README
 delete mode 100644 lib/subunit/c/include/subunit/child.h
 delete mode 100644 lib/subunit/c/lib/child.c
 delete mode 100644 lib/subunit/c/tests/test_child.c
 delete mode 100644 lib/subunit/c/wscript
 delete mode 100644 lib/subunit/configure.ac
 delete mode 100755 lib/subunit/filters/subunit-1to2
 delete mode 100755 lib/subunit/filters/subunit-2to1
 delete mode 100755 lib/subunit/filters/subunit-filter
 delete mode 100755 lib/subunit/filters/subunit-ls
 delete mode 100755 lib/subunit/filters/subunit-notify
 delete mode 100644 lib/subunit/filters/subunit-output
 delete mode 100755 lib/subunit/filters/subunit-stats
 delete mode 100755 lib/subunit/filters/subunit-tags
 delete mode 100755 lib/subunit/filters/subunit2csv
 delete mode 100755 lib/subunit/filters/subunit2gtk
 delete mode 100755 lib/subunit/filters/subunit2junitxml
 delete mode 100755 lib/subunit/filters/subunit2pyunit
 delete mode 100755 lib/subunit/filters/tap2subunit
 delete mode 100644 lib/subunit/libcppunit_subunit.pc.in
 delete mode 100644 lib/subunit/libsubunit.pc.in
 delete mode 100755 lib/subunit/perl/Makefile.PL.in
 delete mode 100644 lib/subunit/perl/lib/Subunit.pm
 delete mode 100644 lib/subunit/perl/lib/Subunit/Diff.pm
 delete mode 100755 lib/subunit/perl/subunit-diff
 delete mode 100644 lib/subunit/python/iso8601/LICENSE
 delete mode 100644 lib/subunit/python/iso8601/README
 delete mode 100644 lib/subunit/python/iso8601/README.subunit
 delete mode 100644 lib/subunit/python/iso8601/setup.py
 delete mode 100644 lib/subunit/python/iso8601/test_iso8601.py
 delete mode 100644 lib/subunit/python/subunit/__init__.py
 delete mode 100644 lib/subunit/python/subunit/_output.py
 delete mode 100644 lib/subunit/python/subunit/chunked.py
 delete mode 100644 lib/subunit/python/subunit/details.py
 delete mode 100644 lib/subunit/python/subunit/filters.py
 delete mode 100644 lib/subunit/python/subunit/iso8601.py
 delete mode 100644 lib/subunit/python/subunit/progress_model.py
 delete mode 100755 lib/subunit/python/subunit/run.py
 delete mode 100644 lib/subunit/python/subunit/test_results.py
 delete mode 100644 lib/subunit/python/subunit/tests/__init__.py
 delete mode 100755 lib/subunit/python/subunit/tests/sample-script.py
 delete mode 100755 lib/subunit/python/subunit/tests/sample-two-script.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_chunked.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_details.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_filters.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_output_filter.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_progress_model.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_run.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_subunit_filter.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_subunit_stats.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_subunit_tags.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_tap2subunit.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_test_protocol.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_test_protocol2.py
 delete mode 100644 lib/subunit/python/subunit/tests/test_test_results.py
 delete mode 100644 lib/subunit/python/subunit/v2.py
 delete mode 100755 lib/subunit/setup.py
 delete mode 100644 lib/subunit/shell/README
 delete mode 100644 lib/subunit/shell/share/subunit.sh
 delete mode 100755 lib/subunit/shell/tests/test_function_output.sh
 delete mode 100755 lib/subunit/shell/tests/test_source_library.sh
 create mode 100644 third_party/subunit/.gitignore
 create mode 100644 third_party/subunit/Apache-2.0
 create mode 100644 third_party/subunit/BSD
 create mode 100644 third_party/subunit/COPYING
 create mode 100644 third_party/subunit/INSTALL
 create mode 100644 third_party/subunit/MANIFEST.in
 create mode 100644 third_party/subunit/Makefile.am
 create mode 100644 third_party/subunit/NEWS
 create mode 100644 third_party/subunit/README
 create mode 100644 third_party/subunit/all_tests.py
 create mode 100644 third_party/subunit/c++/README
 create mode 100644 third_party/subunit/c++/SubunitTestProgressListener.cpp
 create mode 100644 third_party/subunit/c++/SubunitTestProgressListener.h
 create mode 100644 third_party/subunit/c/README
 create mode 100644 third_party/subunit/c/include/subunit/child.h
 create mode 100644 third_party/subunit/c/lib/child.c
 create mode 100644 third_party/subunit/c/tests/test_child.c
 create mode 100644 third_party/subunit/c/wscript
 create mode 100644 third_party/subunit/configure.ac
 create mode 100755 third_party/subunit/filters/subunit-1to2
 create mode 100755 third_party/subunit/filters/subunit-2to1
 create mode 100755 third_party/subunit/filters/subunit-filter
 create mode 100755 third_party/subunit/filters/subunit-ls
 create mode 100755 third_party/subunit/filters/subunit-notify
 create mode 100644 third_party/subunit/filters/subunit-output
 create mode 100755 third_party/subunit/filters/subunit-stats
 create mode 100755 third_party/subunit/filters/subunit-tags
 create mode 100755 third_party/subunit/filters/subunit2csv
 create mode 100755 third_party/subunit/filters/subunit2gtk
 create mode 100755 third_party/subunit/filters/subunit2junitxml
 create mode 100755 third_party/subunit/filters/subunit2pyunit
 create mode 100755 third_party/subunit/filters/tap2subunit
 create mode 100644 third_party/subunit/libcppunit_subunit.pc.in
 create mode 100644 third_party/subunit/libsubunit.pc.in
 create mode 100755 third_party/subunit/perl/Makefile.PL.in
 create mode 100644 third_party/subunit/perl/lib/Subunit.pm
 create mode 100644 third_party/subunit/perl/lib/Subunit/Diff.pm
 create mode 100755 third_party/subunit/perl/subunit-diff
 create mode 100644 third_party/subunit/python/iso8601/LICENSE
 create mode 100644 third_party/subunit/python/iso8601/README
 create mode 100644 third_party/subunit/python/iso8601/README.subunit
 create mode 100644 third_party/subunit/python/iso8601/setup.py
 create mode 100644 third_party/subunit/python/iso8601/test_iso8601.py
 create mode 100644 third_party/subunit/python/subunit/__init__.py
 create mode 100644 third_party/subunit/python/subunit/_output.py
 create mode 100644 third_party/subunit/python/subunit/chunked.py
 create mode 100644 third_party/subunit/python/subunit/details.py
 create mode 100644 third_party/subunit/python/subunit/filters.py
 create mode 100644 third_party/subunit/python/subunit/iso8601.py
 create mode 100644 third_party/subunit/python/subunit/progress_model.py
 create mode 100755 third_party/subunit/python/subunit/run.py
 create mode 100644 third_party/subunit/python/subunit/test_results.py
 create mode 100644 third_party/subunit/python/subunit/tests/__init__.py
 create mode 100755 third_party/subunit/python/subunit/tests/sample-script.py
 create mode 100755 third_party/subunit/python/subunit/tests/sample-two-script.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_chunked.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_details.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_filters.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_output_filter.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_progress_model.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_run.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_subunit_filter.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_subunit_stats.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_subunit_tags.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_tap2subunit.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_test_protocol.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_test_protocol2.py
 create mode 100644 third_party/subunit/python/subunit/tests/test_test_results.py
 create mode 100644 third_party/subunit/python/subunit/v2.py
 create mode 100755 third_party/subunit/setup.py
 create mode 100644 third_party/subunit/shell/README
 create mode 100644 third_party/subunit/shell/share/subunit.sh
 create mode 100755 third_party/subunit/shell/tests/test_function_output.sh
 create mode 100755 third_party/subunit/shell/tests/test_source_library.sh

diff --git a/buildtools/wafsamba/test_duplicate_symbol.sh b/buildtools/wafsamba/test_duplicate_symbol.sh
index 712c7e6..a6df0d3 100755
--- a/buildtools/wafsamba/test_duplicate_symbol.sh
+++ b/buildtools/wafsamba/test_duplicate_symbol.sh
@@ -1,7 +1,7 @@
 #!/bin/sh
 # Run the waf duplicate symbol check, wrapped in subunit.
 
-. lib/subunit/shell/share/subunit.sh
+. third_party/subunit/shell/share/subunit.sh
 
 subunit_start_test duplicate_symbols
 
diff --git a/lib/subunit/.gitignore b/lib/subunit/.gitignore
deleted file mode 100644
index 30f733a..0000000
--- a/lib/subunit/.gitignore
+++ /dev/null
@@ -1,56 +0,0 @@
-/c/lib/child.os
-/c/tests/test_child
-.sconsign
-.sconsign.dblite
-/m4/**
-aclocal.m4
-compile
-config.guess
-config.h.in
-config.sub
-configure
-depcomp
-install-sh
-ltmain.sh
-missing
-autom4te.cache
-Makefile.in
-py-compile
-.deps
-.dirstamp
-.libs
-*.lo
-libsubunit.la
-libcppunit_subunit.la
-libtool
-stamp-h1
-libsubunit.pc
-libcppunit_subunit.pc
-config.log
-config.status
-Makefile
-config.h
-debian/files
-debian/libsubunit0
-debian/libsubunit-dev
-debian/subunit
-debian/python-subunit
-debian/*.log
-debian/*.debhelper
-debian/tmp
-debian/*.substvars
-/perl/blib
-/perl/pm_to_blib
-subunit-*.tar.gz
-subunit-*.tar.gz.asc
-perl/Makefile.PL
-/.testrepository
-__pycache__
-perl/MYMETA.yml
-/build/
-/dist/
-/*.egg-info/
-*.pyc
-*~
-.*.swp
-.*.swo
diff --git a/lib/subunit/Apache-2.0 b/lib/subunit/Apache-2.0
deleted file mode 100644
index d645695..0000000
--- a/lib/subunit/Apache-2.0
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/lib/subunit/BSD b/lib/subunit/BSD
deleted file mode 100644
index fa130cd..0000000
--- a/lib/subunit/BSD
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) Robert Collins and Subunit contributors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
-   notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
-   notice, this list of conditions and the following disclaimer in the
-   documentation and/or other materials provided with the distribution.
-3. Neither the name of Robert Collins nor the names of Subunit contributors
-   may be used to endorse or promote products derived from this software
-   without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/lib/subunit/COPYING b/lib/subunit/COPYING
deleted file mode 100644
index 3ba50f8..0000000
--- a/lib/subunit/COPYING
+++ /dev/null
@@ -1,36 +0,0 @@
-Subunit is licensed under two licenses, the Apache License, Version 2.0 or the
-3-clause BSD License. You may use this project under either of these licenses
-- choose the one that works best for you.
-
-We require contributions to be licensed under both licenses. The primary
-difference between them is that the Apache license takes care of potential
-issues with Patents and other intellectual property concerns. This is
-important to Subunit as Subunit wants to be license compatible in a very 
-broad manner to allow reuse and incorporation into other projects.
-
-Generally every source file in Subunit needs a license grant under both these
-licenses.  As the code is shipped as a single unit, a brief form is used:
-----
-Copyright (c) [yyyy][,yyyy]* [name or 'Subunit Contributors']
-
-Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-license at the users choice. A copy of both licenses are available in the
-project source as Apache-2.0 and BSD. You may not use this file except in
-compliance with one of these two licences.
-
-Unless required by applicable law or agreed to in writing, software
-distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-license you chose for the specific language governing permissions and
-limitations under that license.
-----
-
-Code that has been incorporated into Subunit from other projects will
-naturally be under its own license, and will retain that license.
-
-A known list of such code is maintained here:
-* The python/iso8601 module by Michael Twomey, distributed under an MIT style
-  licence - see python/iso8601/LICENSE for details.
-* The runtests.py and python/subunit/tests/TestUtil.py module are GPL test
-  support modules. There are not installed by Subunit - they are only ever
-  used on the build machine.  Copyright 2004 Canonical Limited.
diff --git a/lib/subunit/INSTALL b/lib/subunit/INSTALL
deleted file mode 100644
index 29052eb..0000000
--- a/lib/subunit/INSTALL
+++ /dev/null
@@ -1,35 +0,0 @@
-To install subunit
-------------------
-
-Bootstrap::
-  autoreconf -vi
-Configure::
-  ./configure
-Install::
-  make install
-
-Dependencies
-------------
-
-* Python for the filters
-* 'testtools' (On Debian and Ubuntu systems the 'python-testtools' package,
-  the testtools package on pypi, or https://launchpad.net/testtools) for
-  the extended test API which permits attachments. Version 0.9.30 or newer is
-  required. Of particular note, http://testtools.python-hosting.com/ is not
-  the testtools you want.
-* 'testscenarios' (On Debian and Ubuntu systems the 'python-testscenarios'
-  package, the 'testscenarios' package on pypi, or
-  https://launchpad.net/testscenarios) for running some of the python unit tests.
-* A C compiler for the C bindings
-* Perl for the Perl tools (including subunit-diff)
-* Check to run the subunit test suite.
-* python-gtk2 if you wish to use subunit2gtk
-* python-junitxml if you wish to use subunit2junitxml
-* pkg-config for configure detection of supporting libraries.
-
-Binary packages
----------------
-
-A number of distributions now include subunit, you can try via your package
-manager. The authors maintain a personal package archive on Launchpad::
-  https://launchpad.net/~testing-cabal/+archive/archive
diff --git a/lib/subunit/MANIFEST.in b/lib/subunit/MANIFEST.in
deleted file mode 100644
index 4f521dc..0000000
--- a/lib/subunit/MANIFEST.in
+++ /dev/null
@@ -1,20 +0,0 @@
-exclude .gitignore
-exclude aclocal.m4
-prune autom4te.cache
-prune c
-prune c++
-prune compile
-exclude configure*
-exclude depcomp
-exclude INSTALL
-exclude install-sh
-exclude lib*
-exclude ltmain.sh
-prune m4
-exclude Makefile*
-exclude missing
-prune perl
-exclude py-compile
-prune shell
-exclude stamp-h1
-include NEWS
diff --git a/lib/subunit/Makefile.am b/lib/subunit/Makefile.am
deleted file mode 100644
index e8f018e..0000000
--- a/lib/subunit/Makefile.am
+++ /dev/null
@@ -1,147 +0,0 @@
-EXTRA_DIST =  \
-	.bzrignore \
-	Apache-2.0 \
-	BSD \
-	INSTALL \
-	Makefile.am \
-	NEWS \
-	README \
-	all_tests.py \
-	c++/README \
-	c/README \
-	c/check-subunit-0.9.3.patch \
-	c/check-subunit-0.9.5.patch \
-	c/check-subunit-0.9.6.patch \
-	perl/Makefile.PL.in \
-	perl/lib/Subunit.pm \
-	perl/lib/Subunit/Diff.pm \
-	perl/subunit-diff \
-	python/iso8601/LICENSE \
-	python/iso8601/README \
-	python/iso8601/README.subunit \
-	python/iso8601/setup.py \
-	python/iso8601/test_iso8601.py \
-	python/subunit/tests/__init__.py \
-	python/subunit/tests/sample-script.py \
-	python/subunit/tests/sample-two-script.py \
-	python/subunit/tests/test_chunked.py \
-	python/subunit/tests/test_details.py \
-	python/subunit/tests/test_filters.py \
-	python/subunit/tests/test_output_filter.py \
-	python/subunit/tests/test_progress_model.py \
-	python/subunit/tests/test_run.py \
-	python/subunit/tests/test_subunit_filter.py \
-	python/subunit/tests/test_subunit_stats.py \
-	python/subunit/tests/test_subunit_tags.py \
-	python/subunit/tests/test_tap2subunit.py \
-	python/subunit/tests/test_test_protocol.py \
-	python/subunit/tests/test_test_protocol2.py \
-	python/subunit/tests/test_test_results.py \
-	setup.py \
-	shell/README \
-	shell/share/subunit.sh \
-	shell/subunit-ui.patch \
-	shell/tests/test_function_output.sh \
-	shell/tests/test_source_library.sh
-
-ACLOCAL_AMFLAGS = -I m4
-
-include_subunitdir = $(includedir)/subunit
-
-dist_bin_SCRIPTS = \
-	filters/subunit-1to2 \
-	filters/subunit-2to1 \
-	filters/subunit-filter \
-	filters/subunit-ls \
-	filters/subunit-notify \
-	filters/subunit-output \
-	filters/subunit-stats \
-	filters/subunit-tags \
-	filters/subunit2csv \
-	filters/subunit2gtk \
-	filters/subunit2junitxml \
-	filters/subunit2pyunit \
-	filters/tap2subunit
-
-TESTS = $(check_PROGRAMS)
-
-## install libsubunit.pc
-pcdatadir = $(libdir)/pkgconfig
-pcdata_DATA = \
-	libsubunit.pc \
-	libcppunit_subunit.pc
-
-pkgpython_PYTHON = \
-	python/subunit/__init__.py \
-	python/subunit/chunked.py \
-	python/subunit/details.py \
-	python/subunit/filters.py \
-	python/subunit/iso8601.py \
-	python/subunit/progress_model.py \
-	python/subunit/run.py \
-	python/subunit/v2.py \
-	python/subunit/test_results.py \
-	python/subunit/_output.py
-
-lib_LTLIBRARIES = libsubunit.la
-lib_LTLIBRARIES +=  libcppunit_subunit.la
-
-include_subunit_HEADERS = \
-	c/include/subunit/child.h \
-	c++/SubunitTestProgressListener.h
-
-check_PROGRAMS = \
-	c/tests/test_child
-
-libsubunit_la_SOURCES = \
-	c/lib/child.c \
-	c/include/subunit/child.h
-
-libcppunit_subunit_la_SOURCES = \
-	c++/SubunitTestProgressListener.cpp \
-	c++/SubunitTestProgressListener.h
-
-tests_LDADD = @CHECK_LIBS@ $(top_builddir)/libsubunit.la
-c_tests_test_child_CFLAGS = -I$(top_srcdir)/c/include $(SUBUNIT_CFLAGS) @CHECK_CFLAGS@
-c_tests_test_child_LDADD = $(tests_LDADD)
-
-
-all-local: perl/Makefile
-	$(MAKE) -C perl all
-
-check-local: perl/Makefile
-	$(MAKE) -C perl check
-	SHELL_SHARE='$(top_srcdir)/shell/share/' \
-	PYTHONPATH='$(abs_top_srcdir)/python':'$(abs_top_srcdir)':${PYTHONPATH} \
-	$(PYTHON) -m testtools.run all_tests.test_suite
-
-clean-local:
-	find . -type f -name "*.pyc" -exec rm {} ';'
-	rm -f perl/Makefile
-
-# Remove perl dir for VPATH builds.
-distclean-local:
-	-rmdir perl > /dev/null
-	-rm perl/Makefile.PL > /dev/null
-
-install-exec-local: perl/Makefile
-	$(MAKE) -C perl install
-
-mostlyclean-local:
-	rm -rf perl/blib
-	rm -rf perl/pm_to_blib
-
-# 'uninstall' perl files during distcheck
-uninstall-local:
-	if [ "_inst" = `basename ${prefix}` ]; then \
-	  $(MAKE) -C perl uninstall_distcheck; \
-	    rm -f "$(DESTDIR)$(bindir)"/subunit-diff; \
-	fi
-
-# The default for MakeMaker; can be overridden by exporting
-INSTALLDIRS ?= site
-
-perl/Makefile: perl/Makefile.PL
-	mkdir -p perl
-	cd perl && perl Makefile.PL INSTALLDIRS=${INSTALLDIRS}
-	-rm perl/Makefile.old > /dev/null
diff --git a/lib/subunit/NEWS b/lib/subunit/NEWS
deleted file mode 100644
index fe8b495..0000000
--- a/lib/subunit/NEWS
+++ /dev/null
@@ -1,547 +0,0 @@
----------------------
-subunit release notes
----------------------
-
-NEXT (In development)
----------------------
-
-0.0.21
-------
-
-BUGFIXES
-~~~~~~~~
-
-* Brown bag bugfix - 0.0.20's setup.py referenced cvs not csv.
-  (Robert Collins, #1361924)
-
-0.0.20
-------
-
-BUGFIXES
-~~~~~~~~
-
-* subunit2csv is now installed when using pip.
-  (Robert Collins, #1279669)
-
-* testscenarios is now a test dependency, not an install dependency.
-  (Arfrever Frehtes Taifersar Arahesis, #1292757)
-
-* The python-subunit tarball can now have setup run from the current
-  directory. (Robert Collins, #1361857)
-
-0.0.19
-------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* ``subunit.run`` in Python will now exit 0 as long as the test stream has
-  been generated correctly - this has always been the intent but API friction
-  with testtools had prevented it working.
-  (Robert Collins)
-
-0.0.18
-------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Fix compatibility with testtools 0.9.35 which dropped the 'all' compat
-  symbol. This breaks support for Python versions lower than 2.6.
-  (Robert Collins, #1274056)
-
-0.0.17
-------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Add ``subunit-output`` tool that can generate a Subunit v2 bytestream from
-  arguments passed on the command line. (Thomi Richards, #1252084)
-
-0.0.16
-------
-
-BUG FIXES
-~~~~~~~~~
-
-* Perl files should now honour perl system config.
-  (Benedikt Morbach, #1233198)
-
-* Python 3.1 and 3.2 have an inconsistent memoryview implementation which
-  required a workaround for NUL byte detection. (Robert Collins, #1216246)
-
-* The test suite was failing 6 tests due to testtools changing it's output
-  formatting of exceptions. (Robert Collins)
-
-* V2 parser errors now set appropriate mime types for the encapsulated packet
-  data and the error message. (Robert Collins)
-
-* When tests fail to import ``python subunit.run -l ...`` will now write a
-  subunit file attachment listing the failed imports and exit 2, rather than
-  listing the stub objects from the importer and exiting 0.
-  (Robert Collins, #1245672)
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Most filters will now accept a file path argument instead of only reading
-  from stdin. (Robert Collins, #409206)
-
-0.0.15
-------
-
-BUG FIXES
-~~~~~~~~~
-
-* Clients of subunit did not expect memoryview objects in StreamResult events.
-  (Robert Collins)
-
-* Memoryview and struct were mutually incompatible in 2.7.3 and 3.2.
-  (Robert Collins, #1216163)
-
-0.0.14
-------
-
-BUG FIXES
-~~~~~~~~~
-
-* Memoryview detection was broken and thus it's use was never really tested.
-  (Robert Collins, 1216101)
-
-* TestProtocol2's tag tests were set sort order dependent.
-  (Robert Collins, #1025392)
-
-* TestTestProtocols' test_tags_both was set sort order dependent.
-  (Robert Collins, #1025392)
-
-* TestTestProtocols' test_*_details were dictionary sort order dependent.
-  (Robert Collins, #1025392)
-
-* TestSubUnitTags's test_add_tag was also se sort order dependent.
-  (Robert Collins, #1025392)
-
-0.0.13
-------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* subunit should now build with automake 1.11 again. (Robert Collins)
-
-* `subunit-stats` no longer outputs encapsulated stdout as subunit.
-  (Robert Collins, #1171987)
-
-* The logic for `subunit.run` is now importable via python -
-  `subunit.run.main`. (Robert Collins, #606770)
-
-BUG FIXES
-~~~~~~~~~
-
-* Removed GPL files that were (C) non Subunit Developers - they are
-  incompatible for binary distribution, which affects redistributors.
-  (Robert Collins, #1185591)
-
-0.0.12
-------
-
-BUG FIXES
-~~~~~~~~~
-
-* Subunit v2 packets with both file content and route code were not being
-  parsed correctly - they would incorrectly emit a parser error, due to trying
-  to parse the route code length from the first byes of the file content.
-  (Robert Collins, 1172815)
-
-0.0.11
-------
-
-v2 protocol draft included in this release. The v2 protocol trades off human
-readability for a massive improvement in robustness, the ability to represent
-concurrent tests in a single stream, cheaper parsing, and that provides
-significantly better in-line debugging support and structured forwarding
-of non-test data (such as stdout or stdin data).
-
-This change includes two new filters (subunit-1to2 and subunit-2to1). Use
-these filters to convert old streams to v2 and convert v2 streams to v1.
-
-All the other filters now only parse and emit v2 streams. V2 is still in
-draft format, so if you want to delay and wait for v2 to be finalised, you
-should use subunit-2to1 before any serialisation steps take place.
-With the ability to encapsulate multiple non-test streams, another significant
-cange is that filters which emit subunit now encapsulate any non-subunit they
-encounter, labelling it 'stdout'. This permits multiplexing such streams and
-detangling the stdout streams from each input.
-
-The subunit libraries (Python etc) have not changed their behaviour: they
-still emit v1 from their existing API calls. New API's are being added
-and applications should migrate once their language has those API's available.
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* ``subunit.run`` now replaces sys.stdout to ensure that stdout is unbuffered
-  - without this pdb output is not reliably visible when stdout is a pipe
-  as it usually is. (Robert Collins)
-
-* v2 protocol draft included in this release. (Python implementation only so
-  far). (Robert Collins)
-
-* Two new Python classes -- ``StreamResultToBytes`` and
-  ``ByteStreamToStreamResult`` handle v2 generation and parsing.
-  (Robert Collins)
-
-0.0.10
-------
-
-BUG FIXES
-~~~~~~~~~
-
-* make_stream_binary is now public for reuse. (Robert Collins)
-
-* NAME was not defined in the protocol BNF. (Robert Collins)
-
-* UnsupportedOperation is available in the Python2.6 io library, so ask
-  forgiveness rather than permission for obtaining it. (Robert Collins)
-
-* Streams with no fileno() attribute are now supported, but they are not
-  checked for being in binary mode: be sure to take care of that if using
-  the library yourself. (Robert Collins)
-
-0.0.9
------
-
-BUG FIXES
-~~~~~~~~~
-
-* All the source files are now included in the distribution tarball.
-  (Arfrever Frehtes Taifersar Arahesis, Robert Collins, #996275)
-
-* ``python/subunit/tests/test_run.py`` and ``python/subunit/filters.py`` were
-  not included in the 0.0.8 tarball. (Robert Collins)
-
-* Test ids which include non-ascii unicode characters are now supported.
-  (Robert Collins, #1029866)
-
-* The ``failfast`` option to ``subunit.run`` will now work. The dependency on
-  testtools has been raised to 0.9.23 to permit this.
-  (Robert Collins, #1090582)
-
-0.0.8
------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Perl module now correctly outputs "failure" instead of "fail".  (Stewart Smith)
-
-* Shell functions now output timestamps. (Stewart Smith, Robert Collins)
-
-* 'subunit2csv' script that converts subunit output to CSV format.
-  (Jonathan Lange)
-
-* ``TagCollapsingDecorator`` now correctly distinguishes between local and
-  global tags.  (Jonathan Lange)
-
-* ``TestResultFilter`` always forwards ``time:`` events.
-  (Benji York, Brad Crittenden)
-
-BUG FIXES
-~~~~~~~~~
-
-* Add 'subunit --no-xfail', which will omit expected failures from the subunit
-  stream. (John Arbash Meinel, #623642)
-
-* Add 'subunit -F/--only-genuine-failures' which sets all of '--no-skips',
-  '--no-xfail', '--no-passthrough, '--no-success', and gives you just the
-  failure stream. (John Arbash Meinel)
-
-* Python2.6 support was broken by the fixup feature.
-  (Arfrever Frehtes Taifersar Arahesis, #987490)
-
-* Python3 support regressed in trunk.
-  (Arfrever Frehtes Taifersar Arahesis, #987514)
-
-* Python3 support was insufficiently robust in detecting unicode streams.
-  (Robert Collins, Arfrever Frehtes Taifersar Arahesis)
-
-* Tag support has been implemented for TestProtocolClient.
-  (Robert Collins, #518016)
-
-* Tags can now be filtered. (Jonathan Lange, #664171)
-
-* Test suite works with latest testtools (but not older ones - formatting
-  changes only). (Robert Collins)
-
-0.0.7
------
-
-The Subunit Python test runner ``python -m subunit.run`` can now report the
-test ids and also filter via a test id list file thanks to improvements in
-``testtools.run``. See the testtools manual, or testrepository - a major
-user of such functionality.
-
-Additionally the protocol now has a keyword uxsuccess for Unexpected Success
-reporting. Older parsers will report tests with this status code as 'lost
-connection'.
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Add ``TimeCollapsingDecorator`` which collapses multiple sequential time()
-  calls into just the first and last. (Jonathan Lange)
-
-* Add ``TagCollapsingDecorator`` which collapses many tags() calls into one
-  where possible. (Jonathan Lange, Robert Collins)
-
-* Force flush of writes to stdout in c/tests/test_child.
-  (Jelmer Vernooij, #687611)
-
-* Provisional Python 3.x support.
-  (Robert Collins, Tres Seaver, Martin[gz], #666819)
-
-* ``subunit.chunked.Decoder`` Python class takes a new ``strict`` option,
-  which defaults to ``True``. When ``False``, the ``Decoder`` will accept
-  incorrect input that is still unambiguous. i.e. subunit will not barf if
-  a \r is missing from the input. (Martin Pool)
-
-* ``subunit-filter`` preserves the relative ordering of ``time:`` statements,
-  so you can now use filtered streams to gather data about how long it takes
-  to run a test. (Jonathan Lange, #716554)
-
-* ``subunit-ls`` now handles a stream with time: instructions that start
-  partway through the stream (which may lead to strange times) more gracefully.
-  (Robert Collins, #785954)
-
-* ``subunit-ls`` should handle the new test outcomes in Python2.7 better.
-  (Robert Collins, #785953)
-
-* ``TestResultFilter`` now collapses sequential calls to time().
-  (Jonathan Lange, #567150)
-
-* ``TestResultDecorator.tags()`` now actually works, and is no longer a buggy
-  copy/paste of ``TestResultDecorator.time()``. (Jonathan Lange, #681828)
-
-* ``TestResultFilter`` now supports a ``fixup_expected_failures``
-  argument. (Jelmer Vernooij, #755241)
-
-* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
-  per ``testtools.run``. This required a dependency bump due to a small
-  API change in ``testtools``. (Robert Collins)
-
-* The help for subunit-filter was confusing about the behaviour of ``-f`` /
-  ``--no-failure``. (Robert Collins, #703392)
-
-* The Python2.7 / testtools addUnexpectedSuccess API is now supported. This
-  required adding a new status code to the protocol. (Robert Collins, #654474)
-
-CHANGES
-~~~~~~~
-
-* testtools 0.9.11 or newer is new needed (due to the Python 3 support).
-  (Robert Collins)
-
-0.0.6
------
-
-This release of subunit fixes a number of unicode related bugs. This depends on
-testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
-is also an optional native setup.py file for use with easy_install and the
-like.
-
-BUG FIXES
-~~~~~~~~~
-
-* Be consistent about delivering unicode content to testtools StringException
-  class which has become (appropriately) conservative. (Robert Collins)
-
-* Fix incorrect reference to subunit_test_failf in c/README.
-  (Brad Hards, #524341)
-
-* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
-  is purely cosmetic as the parameters are passed down with no interpretation.
-  (Robert Collins, #537611)
-
-* Old style tracebacks with no encoding info are now treated as UTF8 rather
-  than some-random-codec-like-ascii. (Robert Collins)
-
-* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
-  binary mode by calling into msvcrt; this avoids having their input or output
-  mangled by the default line ending translation on that platform.
-  (Robert Collins, Martin [gz], #579296)
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Subunit now has a setup.py for python deployments that are not using
-  distribution packages. (Tres Seaver, #538181)
-
-* Subunit now supports test discovery by building on the testtools support for
-  it. You can take advantage of it with "python -m subunit.run discover [path]"
-  and see "python -m subunit.run discover --help" for more options.
-
-* Subunit now uses the improved unicode support in testtools when outputting
-  non-details based test information; this should consistently UTF8 encode such
-  strings.
-
-* The Python TestProtocolClient now flushes output on startTest and stopTest.
-  (Martin [gz]).
-
-
-0.0.5
------
-
-BUG FIXES
-~~~~~~~~~
-
-* make check was failing if subunit wasn't installed due to a missing include
-  path for the test program test_child.
-
-* make distcheck was failing due to a missing $(top_srcdir) rune.
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* New filter `subunit-notify` that will show a notification window with test 
-  statistics when the test run finishes.
-
-* subunit.run will now pipe its output to the command in the 
-  SUBUNIT_FORMATTER environment variable, if set.
-
-0.0.4
------
-
-BUG FIXES
-~~~~~~~~~
-
-* subunit2junitxml -f required a value, this is now fixed and -f acts as a
-  boolean switch with no parameter.
-
-* Building with autoconf 2.65 is now supported.
-
-
-0.0.3
------
-
-  CHANGES:
-  
-    * License change, by unanimous agreement of contributors to BSD/Apache
-      License Version 2.0. This makes Subunit compatible with more testing
-      frameworks.
-
-  IMPROVEMENTS:
-
-    * CPPUnit is now directly supported: subunit builds a cppunit listener
-      ``libcppunit-subunit``. 
-
-    * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
-      from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
-      serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
-      The ``ProtocolTestCase`` parser now calls outcomes using an extended
-      API that permits attaching arbitrary MIME resources such as text files
-      log entries and so on. This extended API is being developed with the
-      Python testing community, and is in flux. ``TestResult`` objects that
-      do not support the API will be detected and transparently downgraded
-      back to the regular Python unittest API.
-
-    * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
-      viarable when installing.
-
-    * Multipart test outcomes are tentatively supported; the exact protocol
-      for them, both serialiser and object is not yet finalised. Testers and
-      early adopters are sought. As part of this and also in an attempt to
-      provider a more precise focus on the wire protocol and toolchain, 
-      Subunit now depends on testtools (http://launchpad.net/testtools)
-      release 0.9.0 or newer.
-
-    * subunit2junitxml supports a new option, --forward which causes it
-      to forward the raw subunit stream in a similar manner to tee. This
-      is used with the -o option to both write a xml report and get some
-      other subunit filter to process the stream.
-
-    * The C library now has ``subunit_test_skip``.
-
-  BUG FIXES:
-
-    * Install progress_model.py correctly.
-
-    * Non-gcc builds will no longer try to use gcc specific flags.
-      (Thanks trondn-norbye)
-
-  API CHANGES:
-
-  INTERNALS:
-
-0.0.2
------
-
-  CHANGES:
-
-  IMPROVEMENTS:
-
-    * A number of filters now support ``--no-passthrough`` to cause all
-      non-subunit content to be discarded. This is useful when precise control
-      over what is output is required - such as with subunit2junitxml.
-
-    * A small perl parser is now included, and a new ``subunit-diff`` tool
-      using that is included. (Jelmer Vernooij)
-
-    * Subunit streams can now include optional, incremental lookahead
-      information about progress. This allows reporters to make estimates
-      about completion, when such information is available. See the README
-      under ``progress`` for more details.
-
-    * ``subunit-filter`` now supports regex filtering via ``--with`` and
-      ``without`` options. (Martin Pool)
-
-    * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
-      test stream.
-
-    * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
-      test reporter to be used, which has a textual progress bar. This requires
-      a recent bzrlib as a minor bugfix was required in bzrlib to support this.
-
-    * ``subunit2junitxml`` has been added. This filter converts a subunit
-      stream to a single JUnit style XML stream using the pyjunitxml
-      python library.
-
-    * The shell functions support skipping via ``subunit_skip_test`` now.
-
-  BUG FIXES:
-
-    * ``xfail`` outcomes are now passed to python TestResult's via
-      addExpectedFailure if it is present on the TestResult. Python 2.6 and
-      earlier which do not have this function will have ``xfail`` outcomes
-      passed through as success outcomes as earlier versions of subunit did.
-
-  API CHANGES:
-
-    * tags are no longer passed around in python via the ``TestCase.tags``
-      attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
-      and like in the protocol, if called while a test is active only applies
-      to that test. (Robert Collins)
-
-    * ``TestResultFilter`` takes a new optional constructor parameter 
-      ``filter_predicate``.  (Martin Pool)
-
-    * When a progress: directive is encountered in a subunit stream, the
-      python bindings now call the ``progress(offset, whence)`` method on
-      ``TestResult``.
-
-    * When a time: directive is encountered in a subunit stream, the python
-      bindings now call the ``time(seconds)`` method on ``TestResult``.
-
-  INTERNALS:
-
-    * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
-      users of subunit will want to wrap their ``TestProtocolClient`` objects
-      in this decorator to get test timing data for performance analysis.
-
-    * (python) ExecTestCase supports passing arguments to test scripts.
-
-    * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
-      which can be used to call some code on every event, without having to
-      implement all the event methods.
-
-    * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
-      causes a timestamp to be output to the stream.
diff --git a/lib/subunit/README b/lib/subunit/README
deleted file mode 100644
index dab8be7..0000000
--- a/lib/subunit/README
+++ /dev/null
@@ -1,470 +0,0 @@
-
-  subunit: A streaming protocol for test results
-  Copyright (C) 2005-2013 Robert Collins <robertc at robertcollins.net>
-
-  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-  license at the users choice. A copy of both licenses are available in the
-  project source as Apache-2.0 and BSD. You may not use this file except in
-  compliance with one of these two licences.
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-  license you chose for the specific language governing permissions and
-  limitations under that license.
-
-  See the COPYING file for full details on the licensing of Subunit.
-
-  subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
-  licence - see python/iso8601/LICENSE for details.
-
-Subunit
--------
-
-Subunit is a streaming protocol for test results.
-
-There are two major revisions of the protocol. Version 1 was trivially human
-readable but had significant defects as far as highly parallel testing was
-concerned - it had no room for doing discovery and execution in parallel,
-required substantial buffering when multiplexing and was fragile - a corrupt
-byte could cause an entire stream to be misparsed. Version 1.1 added
-encapsulation of binary streams which mitigated some of the issues but the
-core remained.
-
-Version 2 shares many of the good characteristics of Version 1 - it can be
-embedded into a regular text stream (e.g. from a build system) and it still
-models xUnit style test execution. It also fixes many of the issues with
-Version 1 - Version 2 can be multiplexed without excessive buffering (in
-time or space), it has a well defined recovery mechanism for dealing with
-corrupted streams (e.g. where two processes write to the same stream
-concurrently, or where the stream generator suffers a bug).
-
-More details on both protocol version s can be found in the 'Protocol' section
-of this document.
-
-Subunit comes with command line filters to process a subunit stream and
-language bindings for python, C, C++ and shell. Bindings are easy to write
-for other languages.
-
-A number of useful things can be done easily with subunit:
- * Test aggregation: Tests run separately can be combined and then
-   reported/displayed together. For instance, tests from different languages
-   can be shown as a seamless whole, and tests running on multiple machines
-   can be aggregated into a single stream through a multiplexer.
- * Test archiving: A test run may be recorded and replayed later.
- * Test isolation: Tests that may crash or otherwise interact badly with each
-   other can be run seperately and then aggregated, rather than interfering
-   with each other or requiring an adhoc test->runner reporting protocol.
- * Grid testing: subunit can act as the necessary serialisation and
-   deserialiation to get test runs on distributed machines to be reported in
-   real time.
-
-Subunit supplies the following filters:
- * tap2subunit - convert perl's TestAnythingProtocol to subunit.
- * subunit2csv - convert a subunit stream to csv.
- * subunit2pyunit - convert a subunit stream to pyunit test results.
- * subunit2gtk - show a subunit stream in GTK.
- * subunit2junitxml - convert a subunit stream to JUnit's XML format.
- * subunit-diff - compare two subunit streams.
- * subunit-filter - filter out tests from a subunit stream.
- * subunit-ls - list info about tests present in a subunit stream.
- * subunit-stats - generate a summary of a subunit stream.
- * subunit-tags - add or remove tags from a stream.
-
-Integration with other tools
-----------------------------
-
-Subunit's language bindings act as integration with various test runners like
-'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
-(typically a few lines) will allow Subunit to be used in more sophisticated
-ways.
-
-Python
-======
-
-Subunit has excellent Python support: most of the filters and tools are written
-in python and there are facilities for using Subunit to increase test isolation
-seamlessly within a test suite.
-
-The most common way is to run an existing python test suite and have it output
-subunit via the ``subunit.run`` module::
-
-  $ python -m subunit.run mypackage.tests.test_suite
-
-For more information on the Python support Subunit offers , please see
-``pydoc subunit``, or the source in ``python/subunit/``
-
-C
-=
-
-Subunit has C bindings to emit the protocol. The 'check' C unit testing project
-has included subunit support in their project for some years now. See
-'c/README' for more details.
-
-C++
-===
-
-The C library is includable and usable directly from C++. A TestListener for
-CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
-
-shell
-=====
-
-There are two sets of shell tools. There are filters, which accept a subunit
-stream on stdin and output processed data (or a transformed stream) on stdout.
-
-Then there are unittest facilities similar to those for C : shell bindings
-consisting of simple functions to output protocol elements, and a patch for
-adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
-details.
-
-Filter recipes
---------------
-
-To ignore some failing tests whose root cause is already known::
-
-  subunit-filter --without 'AttributeError.*flavor'
-
-
-The xUnit test model
---------------------
-
-Subunit implements a slightly modified xUnit test model. The stock standard
-model is that there are tests, which have an id(), can be run, and when run
-start, emit an outcome (like success or failure) and then finish.
-
-Subunit extends this with the idea of test enumeration (find out about tests
-a runner has without running them), tags (allow users to describe tests in
-ways the test framework doesn't apply any semantic value to), file attachments
-(allow arbitrary data to make analysing a failure easy) and timestamps.
-
-The protocol
-------------
-
-Version 2, or v2 is new and still under development, but is intended to
-supercede version 1 in the very near future. Subunit's bundled tools accept
-only version 2 and only emit version 2, but the new filters subunit-1to2 and
-subunit-2to1 can be used to interoperate with older third party libraries.
-
-Version 2
-=========
-
-Version 2 is a binary protocol consisting of independent packets that can be
-embedded in the output from tools like make - as long as each packet has no
-other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
-Version 2 is currently in draft form, and early adopters should be willing
-to either discard stored results (if protocol changes are made), or bulk
-convert them back to v1 and then to a newer edition of v2.
-
-The protocol synchronises at the start of the stream, after a packet, or
-after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
-directly after the end of the prior packet.
-
-Subunit is intended to be transported over a reliable streaming protocol such
-as TCP. As such it does not concern itself with out of order delivery of
-packets. However, because of the possibility of corruption due to either
-bugs in the sender, or due to mixed up data from concurrent writes to the same
-fd when being embedded, subunit strives to recover reasonably gracefully from
-damaged data.
-
-A key design goal for Subunit version 2 is to allow processing and multiplexing
-without forcing buffering for semantic correctness, as buffering tends to hide
-hung or otherwise misbehaving tests. That said, limited time based buffering
-for network efficiency is a good idea - this is ultimately implementator
-choice. Line buffering is also discouraged for subunit streams, as dropping
-into a debugger or other tool may require interactive traffic even if line
-buffering would not otherwise be a problem.
-
-In version two there are two conceptual events - a test status event and a file
-attachment event. Events may have timestamps, and the path of multiplexers that
-an event is routed through is recorded to permit sending actions back to the
-source (such as new tests to run or stdin for driving debuggers and other
-interactive input). Test status events are used to enumerate tests, to report
-tests and test helpers as they run. Tests may have tags, used to allow
-tunnelling extra meanings through subunit without requiring parsing of
-arbitrary file attachments. Things that are not standalone tests get marked
-as such by setting the 'Runnable' flag to false. (For instance, individual
-assertions in TAP are not runnable tests, only the top level TAP test script
-is runnable).
-
-File attachments are used to provide rich detail about the nature of a failure.
-File attachments can also be used to encapsulate stdout and stderr both during
-and outside tests.
-
-Most numbers are stored in network byte order - Most Significant Byte first
-encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
-byte's top 2 high order bits encode the total number of octets in the number.
-This encoding can encode values from 0 to 2**30-1, enough to encode a
-nanosecond. Numbers that are not variable length encoded are still stored in
-MSB order.
-
- prefix   octets   max       max
-+-------+--------+---------+------------+
-| 00    |      1 |  2**6-1 |         63 |
-| 01    |      2 | 2**14-1 |      16383 |
-| 10    |      3 | 2**22-1 |    4194303 |
-| 11    |      4 | 2**30-1 | 1073741823 |
-+-------+--------+---------+------------+
-
-All variable length elements of the packet are stored with a length prefix
-number allowing them to be skipped over for consumers that don't need to
-interpret them.
-
-UTF-8 strings are with no terminating NUL and should not have any embedded NULs
-(implementations SHOULD validate any such strings that they process and take
-some remedial action (such as discarding the packet as corrupt).
-
-In short the structure of a packet is:
-PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
-          FILECONTENT? ROUTING_CODE? CRC32
-
-In more detail...
-
-Packets are identified by a single byte signature - 0xB3, which is never legal
-in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
-bit set and the second not, which is the UTF-8 signature for a continuation
-byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
-the 1 and 0 for a continuation byte.
-
-If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
-a legal character, consider either recoding the text to UTF-8, or using
-subunit's 'file' packets to embed the text stream in subunit, rather than the
-other way around.
-
-Following the signature byte comes a 16-bit flags field, which includes a
-4-bit version field - if the version is not 0x2 then the packet cannot be
-read. It is recommended to signal an error at this point (e.g. by emitting
-a synthetic error packet and returning to the top level loop to look for
-new packets, or exiting with an error). If recovery is desired, treat the
-packet signature as an opaque byte and scan for a new synchronisation point.
-NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
-as they are an 8-bit safe container format, so recovery from this situation
-may involve an arbitrary number of false positives until an actual packet
-is encountered : and even then it may still be false, failing after passing
-the version check due to coincidence.
-
-Flags are stored in network byte order too.
-+-------------------------+------------------------+
-| High byte               | Low byte               |
-| 15 14 13 12 11 10  9  8 | 7  6  5  4  3  2  1  0 |
-| VERSION    |feature bits|                        |
-+------------+------------+------------------------+
-
-Valid version values are:
-0x2 - version 2
-
-Feature bits:
-Bit 11 - mask 0x0800 - Test id present.
-Bit 10 - mask 0x0400 - Routing code present.
-Bit  9 - mask 0x0200 - Timestamp present.
-Bit  8 - mask 0x0100 - Test is 'runnable'.
-Bit  7 - mask 0x0080 - Tags are present.
-Bit  6 - mask 0x0040 - File content is present.
-Bit  5 - mask 0x0020 - File MIME type is present.
-Bit  4 - mask 0x0010 - EOF marker.
-Bit  3 - mask 0x0008 - Must be zero in version 2.
-
-Test status gets three bits:
-Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
-000 - undefined / no test
-001 - Enumeration / existence
-002 - In progress
-003 - Success
-004 - Unexpected Success
-005 - Skipped
-006 - Failed
-007 - Expected failure
-
-After the flags field is a number field giving the length in bytes for the
-entire packet including the signature and the checksum. This length must
-be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
-number but one of the goals is to avoid requiring large buffers, or causing
-large latency in the packet forward/processing pipeline. Larger file
-attachments can be communicated in multiple packets, and the overhead in such a
-4MiB packet is approximately 0.2%.
-
-The rest of the packet is a series of optional features as specified by the set
-feature bits in the flags field. When absent they are entirely absent.
-
-Forwarding and multiplexing of packets can be done without interpreting the
-remainder of the packet until the routing code and checksum (which are both at
-the end of the packet). Additionally, routers can often avoid copying or moving
-the bulk of the packet, as long as the routing code size increase doesn't force
-the length encoding to take up a new byte (which will only happen to packets
-less than or equal to 16KiB in length) - large packets are very efficient to
-route.
-
-Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
-length number for nanoseconds, representing UTC time since Unix Epoch in
-seconds and nanoseconds.
-
-Test id when present is a UTF-8 string. The test id should uniquely identify
-runnable tests such that they can be selected individually. For tests and other
-actions which cannot be individually run (such as test
-fixtures/layers/subtests) uniqueness is not required (though being human
-meaningful is highly recommended).
-
-Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
-There are no restrictions on tag content (other than the restrictions on UTF-8
-strings in subunit in general). Tags have no ordering.
-
-When a MIME type is present, it defines the MIME type for the file across all
-packets same file (routing code + testid + name uniquely identifies a file,
-reset when EOF is flagged). If a file never has a MIME type set, it should be
-treated as application/octet-stream.
-
-File content when present is a UTF-8 string for the name followed by the length
-in bytes of the content, and then the content octets.
-
-If present routing code is a UTF-8 string. The routing code is used to
-determine which test backend a test was running on when doing data analysis,
-and to route stdin to the test process if interaction is required.
-
-Multiplexers SHOULD add a routing code if none is present, and prefix any
-existing routing code with a routing code ('/' separated) if one is already
-present. For example, a multiplexer might label each stream it is multiplexing
-with a simple ordinal ('0', '1' etc), and given an incoming packet with route
-code '3' from stream '0' would adjust the route code when forwarding the packet
-to be '0/3'.
-
-Following the end of the packet is a CRC-32 checksum of the contents of the
-packet including the signature.
-
-Example packets
-~~~~~~~~~~~~~~~
-
-Trivial test "foo" enumeration packet, with test id, runnable set,
-status=enumeration. Spaces below are to visually break up signature / flags /
-length / testid / crc32
-
-b3 2901 0c 03666f6f 08555f1b
-
-
-Version 1 (and 1.1)
-===================
-
-Version 1 (and 1.1) are mostly human readable protocols.
-
-Sample subunit wire contents
-----------------------------
-
-The following::
-  test: test foo works
-  success: test foo works.
-  test: tar a file.
-  failure: tar a file. [
-  ..
-   ]..  space is eaten.
-  foo.c:34 WARNING foo is not defined.
-  ]
-  a writeln to stdout
-
-When run through subunit2pyunit::
-  .F
-  a writeln to stdout
-
-  ========================
-  FAILURE: tar a file.
-  -------------------
-  ..
-  ]..  space is eaten.
-  foo.c:34 WARNING foo is not defined.
-
-
-Subunit protocol description
-============================
-
-This description is being ported to an EBNF style. Currently its only partly in
-that style, but should be fairly clear all the same. When in doubt, refer the
-source (and ideally help fix up the description!). Generally the protocol is
-line orientated and consists of either directives and their parameters, or
-when outside a DETAILS region unexpected lines which are not interpreted by
-the parser - they should be forwarded unaltered.
-
-test|testing|test:|testing: test LABEL
-success|success:|successful|successful: test LABEL
-success|success:|successful|successful: test LABEL DETAILS
-failure: test LABEL
-failure: test LABEL DETAILS
-error: test LABEL
-error: test LABEL DETAILS
-skip[:] test LABEL
-skip[:] test LABEL DETAILS
-xfail[:] test LABEL
-xfail[:] test LABEL DETAILS
-uxsuccess[:] test LABEL
-uxsuccess[:] test LABEL DETAILS
-progress: [+|-]X
-progress: push
-progress: pop
-tags: [-]TAG ...
-time: YYYY-MM-DD HH:MM:SSZ
-
-LABEL: UTF8*
-NAME: UTF8*
-DETAILS ::= BRACKETED | MULTIPART
-BRACKETED ::= '[' CR UTF8-lines ']' CR
-MULTIPART ::= '[ multipart' CR PART* ']' CR
-PART ::= PART_TYPE CR NAME CR PART_BYTES CR
-PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
-PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
-
-unexpected output on stdout -> stdout.
-exit w/0 or last test completing -> error
-
-Tags given outside a test are applied to all following tests
-Tags given after a test: line and before the result line for the same test
-apply only to that test, and inherit the current global tags.
-A '-' before a tag is used to remove tags - e.g. to prevent a global tag
-applying to a single test, or to cancel a global tag.
-
-The progress directive is used to provide progress information about a stream
-so that stream consumer can provide completion estimates, progress bars and so
-on. Stream generators that know how many tests will be present in the stream
-should output "progress: COUNT". Stream filters that add tests should output
-"progress: +COUNT", and those that remove tests should output
-"progress: -COUNT". An absolute count should reset the progress indicators in
-use - it indicates that two separate streams from different generators have
-been trivially concatenated together, and there is no knowledge of how many
-more complete streams are incoming. Smart concatenation could scan each stream
-for their count and sum them, or alternatively translate absolute counts into
-relative counts inline. It is recommended that outputters avoid absolute counts
-unless necessary. The push and pop directives are used to provide local regions
-for progress reporting. This fits with hierarchically operating test
-environments - such as those that organise tests into suites - the top-most
-runner can report on the number of suites, and each suite surround its output
-with a (push, pop) pair. Interpreters should interpret a pop as also advancing
-the progress of the restored level by one step. Encountering progress
-directives between the start and end of a test pair indicates that a previous
-test was interrupted and did not cleanly terminate: it should be implicitly
-closed with an error (the same as when a stream ends with no closing test
-directive for the most recently started test).
-
-The time directive acts as a clock event - it sets the time for all future
-events. The value should be a valid ISO8601 time.
-
-The skip, xfail and uxsuccess outcomes are not supported by all testing
-environments. In Python the testttools (https://launchpad.net/testtools)
-library is used to translate these automatically if an older Python version
-that does not support them is in use. See the testtools documentation for the
-translation policy.
-
-skip is used to indicate a test was discovered but not executed. xfail is used
-to indicate a test that errored in some expected fashion (also know as "TODO"
-tests in some frameworks). uxsuccess is used to indicate and unexpected success
-where a test though to be failing actually passes. It is complementary to
-xfail.
-
-Hacking on subunit
-------------------
-
-Releases
-========
-
-* Update versions in configure.ac and python/subunit/__init__.py.
-* Update NEWS.
-* Do a make distcheck, which will update Makefile etc.
-* Do a PyPI release: PYTHONPATH=../../python python ../../setup.py sdist upload -s
-* Upload the regular one to LP.
-* Push a tagged commit.
-
diff --git a/lib/subunit/all_tests.py b/lib/subunit/all_tests.py
deleted file mode 100644
index 23fd65d..0000000
--- a/lib/subunit/all_tests.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import unittest
-
-import subunit
-
-
-class ShellTests(subunit.ExecTestCase):
-
-    def test_sourcing(self):
-        """./shell/tests/test_source_library.sh"""
-
-    def test_functions(self):
-        """./shell/tests/test_function_output.sh"""
-
-
-def test_suite():
-    result = unittest.TestSuite()
-    result.addTest(subunit.test_suite())
-    result.addTest(ShellTests('test_sourcing'))
-    result.addTest(ShellTests('test_functions'))
-    return result
diff --git a/lib/subunit/c++/README b/lib/subunit/c++/README
deleted file mode 100644
index 7b81844..0000000
--- a/lib/subunit/c++/README
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-#  subunit C++ bindings.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  This program is free software; you can redistribute it and/or modify
-#  it under the terms of the GNU General Public License as published by
-#  the Free Software Foundation; either version 2 of the License, or
-#  (at your option) any later version.
-#
-#  This program is distributed in the hope that it will be useful,
-#  but WITHOUT ANY WARRANTY; without even the implied warranty of
-#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#  GNU General Public License for more details.
-#
-#  You should have received a copy of the GNU General Public License
-#  along with this program; if not, write to the Free Software
-#  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-#
-
-Currently there are no native C++ bindings for subunit. However the C library
-can be used from C++ safely. A CPPUnit listener is built as part of Subunit to
-allow CPPUnit users to simply get Subunit output.
-
-To use the listener, use pkg-config (or your preferred replacement) to get the
-cflags and link settings from libcppunit_subunit.pc.
-
-In your test driver main, use SubunitTestProgressListener, as shown in this
-example main::
-
-  {
-    // Create the event manager and test controller
-    CPPUNIT_NS::TestResult controller;
-  
-    // Add a listener that collects test result
-    // so we can get the overall status.
-    // note this isn't needed for subunit...
-    CPPUNIT_NS::TestResultCollector result;
-    controller.addListener( &result );
-  
-    // Add a listener that print test activity in subunit format.
-    CPPUNIT_NS::SubunitTestProgressListener progress;
-    controller.addListener( &progress );
-  
-    // Add the top suite to the test runner
-    CPPUNIT_NS::TestRunner runner;
-    runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
-    runner.run( controller );
-  
-    return result.wasSuccessful() ? 0 : 1;
-  }
diff --git a/lib/subunit/c++/SubunitTestProgressListener.cpp b/lib/subunit/c++/SubunitTestProgressListener.cpp
deleted file mode 100644
index 76cd9e1..0000000
--- a/lib/subunit/c++/SubunitTestProgressListener.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*  Subunit test listener for cppunit (http://cppunit.sourceforge.net).
- *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
- *
- *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- *  license at the users choice. A copy of both licenses are available in the
- *  project source as Apache-2.0 and BSD. You may not use this file except in
- *  compliance with one of these two licences.
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under these licenses is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the license you chose for the specific language governing permissions
- *  and limitations under that license.
- */
-
-#include <cppunit/Exception.h>
-#include <cppunit/Test.h>
-#include <cppunit/TestFailure.h>
-#include <cppunit/TextOutputter.h>
-#include <iostream>
-
-// Have to be able to import the public interface without config.h.
-#include "SubunitTestProgressListener.h"
-#include "config.h"
-#include "subunit/child.h"
-
-
-CPPUNIT_NS_BEGIN
-
-
-void 
-SubunitTestProgressListener::startTest( Test *test )
-{
-  subunit_test_start(test->getName().c_str());
-  last_test_failed = false;
-}
-
-void 
-SubunitTestProgressListener::addFailure( const TestFailure &failure )
-{
-  std::ostringstream capture_stream;
-  TextOutputter outputter(NULL, capture_stream);
-  outputter.printFailureLocation(failure.sourceLine());
-  outputter.printFailureDetail(failure.thrownException());
-
-  if (failure.isError())
-      subunit_test_error(failure.failedTestName().c_str(),
-        		 capture_stream.str().c_str());
-  else
-      subunit_test_fail(failure.failedTestName().c_str(),
-                        capture_stream.str().c_str());
-  last_test_failed = true;
-}
-
-void 
-SubunitTestProgressListener::endTest( Test *test)
-{
-  if (!last_test_failed)
-      subunit_test_pass(test->getName().c_str());
-}
-
-
-CPPUNIT_NS_END
diff --git a/lib/subunit/c++/SubunitTestProgressListener.h b/lib/subunit/c++/SubunitTestProgressListener.h
deleted file mode 100644
index 5206d83..0000000
--- a/lib/subunit/c++/SubunitTestProgressListener.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*  Subunit test listener for cppunit (http://cppunit.sourceforge.net).
- *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
- *
- *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- *  license at the users choice. A copy of both licenses are available in the
- *  project source as Apache-2.0 and BSD. You may not use this file except in
- *  compliance with one of these two licences.
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under these licenses is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the license you chose for the specific language governing permissions
- *  and limitations under that license.
- */
-#ifndef CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-#define CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-
-#include <cppunit/TestListener.h>
-
-
-CPPUNIT_NS_BEGIN
-
-
-/*! 
- * \brief TestListener that outputs subunit
- * (http://www.robertcollins.net/unittest/subunit) compatible output.
- * \ingroup TrackingTestExecution
- */
-class CPPUNIT_API SubunitTestProgressListener : public TestListener
-{
-public:
- 
-  SubunitTestProgressListener() {}
-  
-  void startTest( Test *test );
-
-  void addFailure( const TestFailure &failure );
-
-  void endTest( Test *test );
-
-private:
-  /// Prevents the use of the copy constructor.
-  SubunitTestProgressListener( const SubunitTestProgressListener &copy );
-
-  /// Prevents the use of the copy operator.
-  void operator =( const SubunitTestProgressListener &copy );
-
-private:
-  int last_test_failed;
-};
-
-
-CPPUNIT_NS_END
-
-#endif  // CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-
diff --git a/lib/subunit/c/README b/lib/subunit/c/README
deleted file mode 100644
index b62fd45..0000000
--- a/lib/subunit/c/README
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-#  subunit C bindings.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-
-This subtree contains an implementation of the subunit child protocol.
-Currently I have no plans to write a test runner in C, so I have not written
-an implementation of the parent protocol. [but will happily accept patches].
-This implementation is built using SCons and tested via 'check'.
-See the tests/ directory for the test programs.
-You can use `make check` or `scons check` to run the tests. 
-
-The C protocol consists of four functions which you can use to output test
-metadata trivially. See lib/subunit_child.[ch] for details.
-
-However, this is not a test runner - subunit provides no support for [for
-instance] managing assertions, cleaning up on errors etc. You can look at
-'check' (http://check.sourceforge.net/) or
-'gunit' (https://garage.maemo.org/projects/gunit) for C unit test
-frameworks. 
-There is a patch for 'check' (check-subunit-*.patch) in this source tree.
-Its also available as request ID #1470750 in the sourceforge request tracker
-http://sourceforge.net/tracker/index.php. The 'check' developers have indicated
-they will merge this during the current release cycle.
-
-If you are a test environment maintainer - either homegrown, or 'check' or
-'gunit' or some other, you will to know how the subunit calls should be used.
-Here is what a manually written test using the bindings might look like:
-
-
-void
-a_test(void) {
-  int result;
-  subunit_test_start("test name");
-  # determine if test passes or fails
-  result = SOME_VALUE;
-  if (!result) {
-    subunit_test_pass("test name");
-  } else {
-    subunit_test_fail("test name",
-      "Something went wrong running something:\n"
-      "exited with result: '%s'", result);
-  }
-}
-
-Which when run with a subunit test runner will generate something like:
-test name ... ok
-
-on success, and:
-
-test name ... FAIL
-
-======================================================================
-FAIL: test name
-----------------------------------------------------------------------
-RemoteError:
-Something went wrong running something:
-exited with result: '1'
diff --git a/lib/subunit/c/include/subunit/child.h b/lib/subunit/c/include/subunit/child.h
deleted file mode 100644
index 896d2df..0000000
--- a/lib/subunit/c/include/subunit/child.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- *
- *  subunit C bindings.
- *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
- *
- *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- *  license at the users choice. A copy of both licenses are available in the
- *  project source as Apache-2.0 and BSD. You may not use this file except in
- *  compliance with one of these two licences.
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under these licenses is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the license you chose for the specific language governing permissions
- *  and limitations under that license.
- **/
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * subunit_test_start:
- *
- * Report that a test is starting.
- * @name: test case name
- */
-extern void subunit_test_start(char const * const name);
-
-
-/**
- * subunit_test_pass:
- *
- * Report that a test has passed.
- *
- * @name: test case name
- */
-extern void subunit_test_pass(char const * const name);
-
-
-/**
- * subunit_test_fail:
- *
- * Report that a test has failed.
- * @name: test case name
- * @error: a string describing the error.
- */
-extern void subunit_test_fail(char const * const name, char const * const error);
-
-
-/**
- * subunit_test_error:
- *
- * Report that a test has errored. An error is an unintentional failure - i.e.
- * a segfault rather than a failed assertion.
- * @name: test case name
- * @error: a string describing the error.
- */
-extern void subunit_test_error(char const * const name,
-                               char const * const error);
-
-
-/**
- * subunit_test_skip:
- *
- * Report that a test has been skipped. An skip is a test that has not run to
- * conclusion but hasn't given an error either - its result is unknown.
- * @name: test case name
- * @reason: a string describing the reason for the skip.
- */
-extern void subunit_test_skip(char const * const name, 
-			      char const * const reason);
-
-
-enum subunit_progress_whence {
-	SUBUNIT_PROGRESS_SET,
-	SUBUNIT_PROGRESS_CUR,
-	SUBUNIT_PROGRESS_POP,
-	SUBUNIT_PROGRESS_PUSH,
-};
-
-/**
- * subunit_progress:
- *
- * Report the progress of a test run.
- * @whence: The type of progress update to report.
- * @offset: Offset of the progress (only for SUBUNIT_PROGRESS_SET
- * 			and SUBUNIT_PROGRESS_CUR).
- */
-extern void subunit_progress(enum subunit_progress_whence whence, int offset);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/lib/subunit/c/lib/child.c b/lib/subunit/c/lib/child.c
deleted file mode 100644
index 20f38da..0000000
--- a/lib/subunit/c/lib/child.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- *
- *  subunit C child-side bindings: report on tests being run.
- *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
- *
- *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- *  license at the users choice. A copy of both licenses are available in the
- *  project source as Apache-2.0 and BSD. You may not use this file except in
- *  compliance with one of these two licences.
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under these licenses is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the license you chose for the specific language governing permissions
- *  and limitations under that license.
- **/
-
-#include <stdio.h>
-#include <string.h>
-#include "subunit/child.h"
-
-/* Write details about a test event. It is the callers responsibility to ensure
- * that details are only provided for events the protocol expects details on.
- * @event: The event - e.g. 'skip'
- * @name: The test name/id.
- * @details: The details of the event, may be NULL if no details are present.
- */
-static void
-subunit_send_event(char const * const event, char const * const name,
-		   char const * const details)
-{
-  if (NULL == details) {
-    fprintf(stdout, "%s: %s\n", event, name);
-  } else {
-    fprintf(stdout, "%s: %s [\n", event, name);
-    fprintf(stdout, "%s", details);
-    if (details[strlen(details) - 1] != '\n')
-      fprintf(stdout, "\n");
-    fprintf(stdout, "]\n");
-  }
-  fflush(stdout);
-}
-
-/* these functions all flush to ensure that the test runner knows the action
- * that has been taken even if the subsequent test etc takes a long time or
- * never completes (i.e. a segfault).
- */
-
-void
-subunit_test_start(char const * const name)
-{
-  subunit_send_event("test", name, NULL);
-}
-
-
-void
-subunit_test_pass(char const * const name)
-{
-  /* TODO: add success details as an option */
-  subunit_send_event("success", name, NULL);
-}
-
-
-void
-subunit_test_fail(char const * const name, char const * const error)
-{
-  subunit_send_event("failure", name, error);
-}
-
-
-void
-subunit_test_error(char const * const name, char const * const error)
-{
-  subunit_send_event("error", name, error);
-}
-
-
-void
-subunit_test_skip(char const * const name, char const * const reason)
-{
-  subunit_send_event("skip", name, reason);
-}
-
-void
-subunit_progress(enum subunit_progress_whence whence, int offset)
-{
-	switch (whence) {
-	case SUBUNIT_PROGRESS_SET:
-		printf("progress: %d\n", offset);
-		break;
-	case SUBUNIT_PROGRESS_CUR:
-		printf("progress: %+-d\n", offset);
-		break;
-	case SUBUNIT_PROGRESS_POP:
-		printf("progress: pop\n");
-		break;
-	case SUBUNIT_PROGRESS_PUSH:
-		printf("progress: push\n");
-		break;
-	default:
-		fprintf(stderr, "Invalid whence %d in subunit_progress()\n", whence);
-		break;
-	}
-}
diff --git a/lib/subunit/c/tests/test_child.c b/lib/subunit/c/tests/test_child.c
deleted file mode 100644
index 1318322..0000000
--- a/lib/subunit/c/tests/test_child.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- *
- *  subunit C bindings.
- *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
- *
- *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- *  license at the users choice. A copy of both licenses are available in the
- *  project source as Apache-2.0 and BSD. You may not use this file except in
- *  compliance with one of these two licences.
- *  
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under these licenses is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the license you chose for the specific language governing permissions
- *  and limitations under that license.
- **/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <string.h>
-#include <check.h>
-
-#include "subunit/child.h"
-
-/**
- * Helper function to capture stdout, run some call, and check what
- * was written.
- * @expected the expected stdout content
- * @function the function to call.
- **/
-static void
-test_stdout_function(char const * expected,
-                     void (*function)(void))
-{
-    /* test that the start function emits a correct test: line. */
-    int bytecount;
-    int old_stdout;
-    int new_stdout[2];
-    char buffer[100];
-    /* we need a socketpair to capture stdout in */
-    fail_if(pipe(new_stdout), "Failed to create a socketpair.");
-    /* backup stdout so we can replace it */
-    old_stdout = dup(1);
-    if (old_stdout == -1) {
-      close(new_stdout[0]);
-      close(new_stdout[1]);
-      fail("Failed to backup stdout before replacing.");
-    }
-    /* redirect stdout so we can analyse it */
-    if (dup2(new_stdout[1], 1) != 1) {
-      close(old_stdout);
-      close(new_stdout[0]);
-      close(new_stdout[1]);
-      fail("Failed to redirect stdout");
-    }
-    /* yes this can block. Its a test case with < 100 bytes of output.
-     * DEAL.
-     */
-    function();
-    /* flush writes on FILE object to file descriptor */
-    fflush(stdout);
-    /* restore stdout now */
-    if (dup2(old_stdout, 1) != 1) {
-      close(old_stdout);
-      close(new_stdout[0]);
-      close(new_stdout[1]);
-      fail("Failed to restore stdout");
-    }
-    /* and we dont need the write side any more */
-    if (close(new_stdout[1])) {
-      close(new_stdout[0]);
-      fail("Failed to close write side of socketpair.");
-    }
-    /* get the output */
-    bytecount = read(new_stdout[0], buffer, 100);
-    if (0 > bytecount) {
-      close(new_stdout[0]);
-      fail("Failed to read captured output.");
-    }
-    buffer[bytecount]='\0';
-    /* and we dont need the read side any more */
-    fail_if(close(new_stdout[0]), "Failed to close write side of socketpair.");
-    /* compare with expected outcome */
-    fail_if(strcmp(expected, buffer), "Did not get expected output [%s], got [%s]", expected, buffer);
-}
-
-
-static void
-call_test_start(void)
-{
-    subunit_test_start("test case");
-}
-
-
-START_TEST (test_start)
-{
-    test_stdout_function("test: test case\n", call_test_start);
-}
-END_TEST
-
-
-static void
-call_test_pass(void)
-{
-    subunit_test_pass("test case");
-}
-
-
-START_TEST (test_pass)
-{
-    test_stdout_function("success: test case\n", call_test_pass);
-}
-END_TEST
-
-
-static void
-call_test_fail(void)
-{
-    subunit_test_fail("test case", "Multiple lines\n of error\n");
-}
-
-
-START_TEST (test_fail)
-{
-    test_stdout_function("failure: test case [\n"
-                         "Multiple lines\n"
-        		 " of error\n"
-			 "]\n",
-			 call_test_fail);
-}
-END_TEST
-
-
-static void
-call_test_error(void)
-{
-    subunit_test_error("test case", "Multiple lines\n of output\n");
-}
-
-
-START_TEST (test_error)
-{
-    test_stdout_function("error: test case [\n"
-                         "Multiple lines\n"
-        		 " of output\n"
-			 "]\n",
-			 call_test_error);
-}
-END_TEST
-
-
-static void
-call_test_skip(void)
-{
-    subunit_test_skip("test case", "Multiple lines\n of output\n");
-}
-
-
-START_TEST (test_skip)
-{
-    test_stdout_function("skip: test case [\n"
-                         "Multiple lines\n"
-        		 " of output\n"
-			 "]\n",
-			 call_test_skip);
-}
-END_TEST
-
-
-static void
-call_test_progress_pop(void)
-{
-	subunit_progress(SUBUNIT_PROGRESS_POP, 0);
-}
-
-static void
-call_test_progress_set(void)
-{
-	subunit_progress(SUBUNIT_PROGRESS_SET, 5);
-}
-
-static void
-call_test_progress_push(void)
-{
-	subunit_progress(SUBUNIT_PROGRESS_PUSH, 0);
-}
-
-static void
-call_test_progress_cur(void)
-{
-	subunit_progress(SUBUNIT_PROGRESS_CUR, -6);
-}
-
-START_TEST (test_progress)
-{
-	test_stdout_function("progress: pop\n",
-			 call_test_progress_pop);
-	test_stdout_function("progress: push\n",
-			 call_test_progress_push);
-	test_stdout_function("progress: 5\n",
-			 call_test_progress_set);
-	test_stdout_function("progress: -6\n",
-			 call_test_progress_cur);
-}
-END_TEST
-
-static Suite *
-child_suite(void)
-{
-    Suite *s = suite_create("subunit_child");
-    TCase *tc_core = tcase_create("Core");
-    suite_add_tcase (s, tc_core);
-    tcase_add_test (tc_core, test_start);
-    tcase_add_test (tc_core, test_pass);
-    tcase_add_test (tc_core, test_fail);
-    tcase_add_test (tc_core, test_error);
-    tcase_add_test (tc_core, test_skip);
-    tcase_add_test (tc_core, test_progress);
-    return s;
-}
-
-
-int
-main(void)
-{
-  int nf;
-  Suite *s = child_suite();
-  SRunner *sr = srunner_create(s);
-  srunner_run_all(sr, CK_NORMAL);
-  nf = srunner_ntests_failed(sr);
-  srunner_free(sr);
-  return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
-}
diff --git a/lib/subunit/c/wscript b/lib/subunit/c/wscript
deleted file mode 100644
index f308b33..0000000
--- a/lib/subunit/c/wscript
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-
-import Options
-
-def configure(conf):
-	if conf.CHECK_BUNDLED_SYSTEM_PKG('subunit', pkg='libsubunit'):
-		conf.define('USING_SYSTEM_SUBUNIT', 1)
-
-def build(bld):
-	if bld.CONFIG_SET('USING_SYSTEM_SUBUNIT'):
-		return
-
-	bld.SAMBA_LIBRARY('subunit',
-			  source='lib/child.c',
-			  private_library=True,
-			  includes='include')
diff --git a/lib/subunit/configure.ac b/lib/subunit/configure.ac
deleted file mode 100644
index ef1a048..0000000
--- a/lib/subunit/configure.ac
+++ /dev/null
@@ -1,76 +0,0 @@
-m4_define([SUBUNIT_MAJOR_VERSION], [0])
-m4_define([SUBUNIT_MINOR_VERSION], [0])
-m4_define([SUBUNIT_MICRO_VERSION], [21])
-m4_define([SUBUNIT_VERSION],
-m4_defn([SUBUNIT_MAJOR_VERSION]).m4_defn([SUBUNIT_MINOR_VERSION]).m4_defn([SUBUNIT_MICRO_VERSION]))
-AC_PREREQ([2.59])
-AC_INIT([subunit], [SUBUNIT_VERSION], [subunit-dev at lists.launchpad.net])
-AC_CONFIG_SRCDIR([c/lib/child.c])
-AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects])
-AC_CONFIG_MACRO_DIR([m4])
-[SUBUNIT_MAJOR_VERSION]=SUBUNIT_MAJOR_VERSION
-[SUBUNIT_MINOR_VERSION]=SUBUNIT_MINOR_VERSION
-[SUBUNIT_MICRO_VERSION]=SUBUNIT_MICRO_VERSION
-[SUBUNIT_VERSION]=SUBUNIT_VERSION
-AC_SUBST([SUBUNIT_MAJOR_VERSION])
-AC_SUBST([SUBUNIT_MINOR_VERSION])
-AC_SUBST([SUBUNIT_MICRO_VERSION])
-AC_SUBST([SUBUNIT_VERSION])
-AC_USE_SYSTEM_EXTENSIONS
-AC_PROG_CC
-AC_PROG_CXX
-m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
-AM_PROG_CC_C_O
-AC_PROG_INSTALL
-AC_PROG_LN_S
-AC_PROG_LIBTOOL
-AM_PATH_PYTHON
-
-AS_IF([test "$GCC" = "yes"],
-      [
-  SUBUNIT_CFLAGS="-Wall -Werror -Wextra -Wstrict-prototypes "
-  SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wmissing-prototypes -Wwrite-strings "
-  SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wno-variadic-macros "
-  SUBUNIT_CXXFLAGS="-Wall -Werror -Wextra -Wwrite-strings -Wno-variadic-macros"
-      ])
-
-AM_CFLAGS="$SUBUNIT_CFLAGS -I\$(top_srcdir)/c/include"
-AM_CXXFLAGS="$SUBUNIT_CXXFLAGS -I\$(top_srcdir)/c/include"
-AC_SUBST(AM_CFLAGS)
-AC_SUBST(AM_CXXFLAGS)
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_CHECK_HEADERS([stdlib.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_SIZE_T
-AC_HEADER_TIME
-AC_STRUCT_TM
-
-AC_CHECK_SIZEOF(int, 4)
-AC_CHECK_SIZEOF(short, 2)
-AC_CHECK_SIZEOF(long, 4)
-
-# Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-
-# Easier memory management.
-# C unit testing.
-PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])
-# C++ unit testing.
-PKG_CHECK_MODULES([CPPUNIT], [cppunit])
-
-# Output files
-AC_CONFIG_HEADERS([config.h])
-
-AC_CONFIG_FILES([libsubunit.pc
-		 libcppunit_subunit.pc
-                 Makefile
-		 perl/Makefile.PL
-                 ])
-AC_OUTPUT
diff --git a/lib/subunit/filters/subunit-1to2 b/lib/subunit/filters/subunit-1to2
deleted file mode 100755
index d59447b..0000000
--- a/lib/subunit/filters/subunit-1to2
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Convert a version 1 subunit stream to version 2 stream."""
-
-from optparse import OptionParser
-import sys
-
-from testtools import ExtendedToStreamDecorator
-
-from subunit import StreamResultToBytes
-from subunit.filters import find_stream, run_tests_from_stream
-
-
-def make_options(description):
-    parser = OptionParser(description=__doc__)
-    return parser
-
-
-def main():
-    parser = make_options(__doc__)
-    (options, args) = parser.parse_args()
-    run_tests_from_stream(find_stream(sys.stdin, args),
-        ExtendedToStreamDecorator(StreamResultToBytes(sys.stdout)))
-    sys.exit(0)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/subunit/filters/subunit-2to1 b/lib/subunit/filters/subunit-2to1
deleted file mode 100755
index 4dc36b9..0000000
--- a/lib/subunit/filters/subunit-2to1
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Convert a version 2 subunit stream to a version 1 stream."""
-
-from optparse import OptionParser
-import sys
-
-from testtools import StreamToExtendedDecorator
-
-from subunit import ByteStreamToStreamResult, TestProtocolClient
-from subunit.filters import find_stream, run_tests_from_stream
-
-
-def make_options(description):
-    parser = OptionParser(description=__doc__)
-    return parser
-
-
-def main():
-    parser = make_options(__doc__)
-    (options, args) = parser.parse_args()
-    case = ByteStreamToStreamResult(
-        find_stream(sys.stdin, args), non_subunit_name='stdout')
-    result = StreamToExtendedDecorator(TestProtocolClient(sys.stdout))
-    # What about stdout chunks?
-    result.startTestRun()
-    case.run(result)
-    result.stopTestRun()
-    sys.exit(0)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/subunit/filters/subunit-filter b/lib/subunit/filters/subunit-filter
deleted file mode 100755
index e9e2bb0..0000000
--- a/lib/subunit/filters/subunit-filter
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 200-2013  Robert Collins <robertc at robertcollins.net>
-#            (C) 2009  Martin Pool
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Filter a subunit stream to include/exclude tests.
-
-The default is to strip successful tests.
-
-Tests can be filtered by Python regular expressions with --with and --without,
-which match both the test name and the error text (if any).  The result
-contains tests which match any of the --with expressions and none of the
---without expressions.  For case-insensitive matching prepend '(?i)'.
-Remember to quote shell metacharacters.
-"""
-
-from optparse import OptionParser
-import sys
-import re
-
-from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
-
-from subunit import (
-    DiscardStream,
-    ProtocolTestCase,
-    StreamResultToBytes,
-    read_test_list,
-    )
-from subunit.filters import filter_by_result, find_stream
-from subunit.test_results import (
-    and_predicates,
-    make_tag_filter,
-    TestResultFilter,
-    )
-
-
-def make_options(description):
-    parser = OptionParser(description=__doc__)
-    parser.add_option("--error", action="store_false",
-        help="include errors", default=False, dest="error")
-    parser.add_option("-e", "--no-error", action="store_true",
-        help="exclude errors", dest="error")
-    parser.add_option("--failure", action="store_false",
-        help="include failures", default=False, dest="failure")
-    parser.add_option("-f", "--no-failure", action="store_true",
-        help="exclude failures", dest="failure")
-    parser.add_option("--passthrough", action="store_false",
-        help="Forward non-subunit input as 'stdout'.", default=False,
-        dest="no_passthrough")
-    parser.add_option("--no-passthrough", action="store_true",
-        help="Discard all non subunit input.", default=False,
-        dest="no_passthrough")
-    parser.add_option("-s", "--success", action="store_false",
-        help="include successes", dest="success")
-    parser.add_option("--no-success", action="store_true",
-        help="exclude successes", default=True, dest="success")
-    parser.add_option("--no-skip", action="store_true",
-        help="exclude skips", dest="skip")
-    parser.add_option("--xfail", action="store_false",
-        help="include expected falures", default=True, dest="xfail")
-    parser.add_option("--no-xfail", action="store_true",
-        help="exclude expected falures", default=True, dest="xfail")
-    parser.add_option(
-        "--with-tag", type=str,
-        help="include tests with these tags", action="append", dest="with_tags")
-    parser.add_option(
-        "--without-tag", type=str,
-        help="exclude tests with these tags", action="append", dest="without_tags")
-    parser.add_option("-m", "--with", type=str,
-        help="regexp to include (case-sensitive by default)",
-        action="append", dest="with_regexps")
-    parser.add_option("--fixup-expected-failures", type=str,
-        help="File with list of test ids that are expected to fail; on failure "
-             "their result will be changed to xfail; on success they will be "
-             "changed to error.", dest="fixup_expected_failures", action="append")
-    parser.add_option("--without", type=str,
-        help="regexp to exclude (case-sensitive by default)",
-        action="append", dest="without_regexps")
-    parser.add_option("-F", "--only-genuine-failures", action="callback",
-        callback=only_genuine_failures_callback,
-        help="Only pass through failures and exceptions.")
-    return parser
-
-
-def only_genuine_failures_callback(option, opt, value, parser):
-    parser.rargs.insert(0, '--no-passthrough')
-    parser.rargs.insert(0, '--no-xfail')
-    parser.rargs.insert(0, '--no-skip')
-    parser.rargs.insert(0, '--no-success')
-
-
-def _compile_re_from_list(l):
-    return re.compile("|".join(l), re.MULTILINE)
-
-
-def _make_regexp_filter(with_regexps, without_regexps):
-    """Make a callback that checks tests against regexps.
-
-    with_regexps and without_regexps are each either a list of regexp strings,
-    or None.
-    """
-    with_re = with_regexps and _compile_re_from_list(with_regexps)
-    without_re = without_regexps and _compile_re_from_list(without_regexps)
-
-    def check_regexps(test, outcome, err, details, tags):
-        """Check if this test and error match the regexp filters."""
-        test_str = str(test) + outcome + str(err) + str(details)
-        if with_re and not with_re.search(test_str):
-            return False
-        if without_re and without_re.search(test_str):
-            return False
-        return True
-    return check_regexps
-
-
-def _make_result(output, options, predicate):
-    """Make the result that we'll send the test outcomes to."""
-    fixup_expected_failures = set()
-    for path in options.fixup_expected_failures or ():
-        fixup_expected_failures.update(read_test_list(path))
-    return StreamToExtendedDecorator(TestResultFilter(
-        ExtendedToStreamDecorator(
-        StreamResultToBytes(output)),
-        filter_error=options.error,
-        filter_failure=options.failure,
-        filter_success=options.success,
-        filter_skip=options.skip,
-        filter_xfail=options.xfail,
-        filter_predicate=predicate,
-        fixup_expected_failures=fixup_expected_failures))
-
-
-def main():
-    parser = make_options(__doc__)
-    (options, args) = parser.parse_args()
-
-    regexp_filter = _make_regexp_filter(
-        options.with_regexps, options.without_regexps)
-    tag_filter = make_tag_filter(options.with_tags, options.without_tags)
-    filter_predicate = and_predicates([regexp_filter, tag_filter])
-
-    filter_by_result(
-        lambda output_to: _make_result(sys.stdout, options, filter_predicate),
-        output_path=None,
-        passthrough=(not options.no_passthrough),
-        forward=False,
-        protocol_version=2,
-        input_stream=find_stream(sys.stdin, args))
-    sys.exit(0)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/subunit/filters/subunit-ls b/lib/subunit/filters/subunit-ls
deleted file mode 100755
index 8c6a1e7..0000000
--- a/lib/subunit/filters/subunit-ls
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2008  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""List tests in a subunit stream."""
-
-from optparse import OptionParser
-import sys
-
-from testtools import (
-    CopyStreamResult, StreamToExtendedDecorator, StreamResultRouter,
-    StreamSummary)
-
-from subunit import ByteStreamToStreamResult
-from subunit.filters import find_stream, run_tests_from_stream
-from subunit.test_results import (
-    CatFiles,
-    TestIdPrintingResult,
-    )
-
-
-parser = OptionParser(description=__doc__)
-parser.add_option("--times", action="store_true",
-    help="list the time each test took (requires a timestamped stream)",
-        default=False)
-parser.add_option("--exists", action="store_true",
-    help="list tests that are reported as existing (as well as ran)",
-        default=False)
-parser.add_option("--no-passthrough", action="store_true",
-    help="Hide all non subunit input.", default=False, dest="no_passthrough")
-(options, args) = parser.parse_args()
-test = ByteStreamToStreamResult(
-    find_stream(sys.stdin, args), non_subunit_name="stdout")
-result = TestIdPrintingResult(sys.stdout, options.times, options.exists)
-if not options.no_passthrough:
-    result = StreamResultRouter(result)
-    cat = CatFiles(sys.stdout)
-    result.add_rule(cat, 'test_id', test_id=None)
-summary = StreamSummary()
-result = CopyStreamResult([result, summary])
-result.startTestRun()
-test.run(result)
-result.stopTestRun()
-if summary.wasSuccessful():
-    exit_code = 0
-else:
-    exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-notify b/lib/subunit/filters/subunit-notify
deleted file mode 100755
index bc833da..0000000
--- a/lib/subunit/filters/subunit-notify
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2010 Jelmer Vernooij <jelmer at samba.org>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Notify the user of a finished test run."""
-
-import pygtk
-pygtk.require('2.0')
-import pynotify
-from testtools import StreamToExtendedDecorator
-
-from subunit import TestResultStats
-from subunit.filters import run_filter_script
-
-if not pynotify.init("Subunit-notify"):
-    sys.exit(1)
-
-
-def notify_of_result(result):
-    result = result.decorated
-    if result.failed_tests > 0:
-        summary = "Test run failed"
-    else:
-        summary = "Test run successful"
-    body = "Total tests: %d; Passed: %d; Failed: %d" % (
-        result.total_tests,
-        result.passed_tests,
-        result.failed_tests,
-    )
-    nw = pynotify.Notification(summary, body)
-    nw.show()
-
-
-run_filter_script(
-    lambda output:StreamToExtendedDecorator(TestResultStats(output)),
-    __doc__, notify_of_result, protocol_version=2)
diff --git a/lib/subunit/filters/subunit-output b/lib/subunit/filters/subunit-output
deleted file mode 100644
index 61e5d11..0000000
--- a/lib/subunit/filters/subunit-output
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2013 Subunit Contributors
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-
-
-"""A command-line tool to generate a subunit result byte-stream."""
-
-from subunit._output import output_main
-
-
-if __name__ == '__main__':
-    exit(output_main())
diff --git a/lib/subunit/filters/subunit-stats b/lib/subunit/filters/subunit-stats
deleted file mode 100755
index 79733b0..0000000
--- a/lib/subunit/filters/subunit-stats
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Filter a subunit stream to get aggregate statistics."""
-
-import sys
-
-from testtools import StreamToExtendedDecorator
-
-from subunit import TestResultStats
-from subunit.filters import run_filter_script
-
-
-result = TestResultStats(sys.stdout)
-def show_stats(r):
-    r.decorated.formatStats()
-run_filter_script(
-    lambda output:StreamToExtendedDecorator(result),
-    __doc__, show_stats, protocol_version=2, passthrough_subunit=False)
diff --git a/lib/subunit/filters/subunit-tags b/lib/subunit/filters/subunit-tags
deleted file mode 100755
index 1022492..0000000
--- a/lib/subunit/filters/subunit-tags
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""A filter to change tags on a subunit stream.
-
-subunit-tags foo -> adds foo
-subunit-tags foo -bar -> adds foo and removes bar
-"""
-
-import sys
-
-from subunit import tag_stream
-
-sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/lib/subunit/filters/subunit2csv b/lib/subunit/filters/subunit2csv
deleted file mode 100755
index 4adf5cd..0000000
--- a/lib/subunit/filters/subunit2csv
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is d on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Turn a subunit stream into a CSV"""
-
-from testtools import StreamToExtendedDecorator
-
-from subunit.filters import run_filter_script
-from subunit.test_results import CsvResult
-
-
-run_filter_script(lambda output:StreamToExtendedDecorator(CsvResult(output)),
-    __doc__, protocol_version=2)
diff --git a/lib/subunit/filters/subunit2gtk b/lib/subunit/filters/subunit2gtk
deleted file mode 100755
index 78b4309..0000000
--- a/lib/subunit/filters/subunit2gtk
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-### The GTK progress bar __init__ function is derived from the pygtk tutorial:
-# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
-# 
-# The GTK Tutorial is Copyright (C) 1997 Ian Main.
-# 
-# Copyright (C) 1998-1999 Tony Gale.
-# 
-# Permission is granted to make and distribute verbatim copies of this manual
-# provided the copyright notice and this permission notice are preserved on all
-# copies.
-# 
-# Permission is granted to copy and distribute modified versions of this
-# document under the conditions for verbatim copying, provided that this
-# copyright notice is included exactly as in the original, and that the entire
-# resulting derived work is distributed under the terms of a permission notice
-# identical to this one.
-# 
-# Permission is granted to copy and distribute translations of this document
-# into another language, under the above conditions for modified versions.
-# 
-# If you are intending to incorporate this document into a published work,
-# please contact the maintainer, and we will make an effort to ensure that you
-# have the most up to date information available.
-# 
-# There is no guarantee that this document lives up to its intended purpose.
-# This is simply provided as a free resource. As such, the authors and
-# maintainers of the information provided within can not make any guarantee
-# that the information is even accurate.
-
-"""Display a subunit stream in a gtk progress window."""
-
-import sys
-import threading
-import unittest
-
-import pygtk
-pygtk.require('2.0')
-import gtk, gtk.gdk, gobject
-
-from testtools import StreamToExtendedDecorator
-
-from subunit import (
-    PROGRESS_POP,
-    PROGRESS_PUSH,
-    PROGRESS_SET,
-    ByteStreamToStreamResult,
-    )
-from subunit.progress_model import  ProgressModel
-
-
-class GTKTestResult(unittest.TestResult):
-
-    def __init__(self):
-        super(GTKTestResult, self).__init__()
-        # Instance variables (in addition to TestResult)
-        self.window = None
-        self.run_label = None
-        self.ok_label = None
-        self.not_ok_label = None
-        self.total_tests = None
-
-        self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
-        self.window.set_resizable(True)
-
-        self.window.connect("destroy", gtk.main_quit)
-        self.window.set_title("Tests...")
-        self.window.set_border_width(0)
-
-        vbox = gtk.VBox(False, 5)
-        vbox.set_border_width(10)
-        self.window.add(vbox)
-        vbox.show()
-
-        # Create a centering alignment object
-        align = gtk.Alignment(0.5, 0.5, 0, 0)
-        vbox.pack_start(align, False, False, 5)
-        align.show()
-
-        # Create the ProgressBar
-        self.pbar = gtk.ProgressBar()
-        align.add(self.pbar)
-        self.pbar.set_text("Running")
-        self.pbar.show()
-        self.progress_model = ProgressModel()
-
-        separator = gtk.HSeparator()
-        vbox.pack_start(separator, False, False, 0)
-        separator.show()
-
-        # rows, columns, homogeneous
-        table = gtk.Table(2, 3, False)
-        vbox.pack_start(table, False, True, 0)
-        table.show()
-        # Show summary details about the run. Could use an expander.
-        label = gtk.Label("Run:")
-        table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        label.show()
-        self.run_label = gtk.Label("N/A")
-        table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        self.run_label.show()
-
-        label = gtk.Label("OK:")
-        table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        label.show()
-        self.ok_label = gtk.Label("N/A")
-        table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        self.ok_label.show()
-
-        label = gtk.Label("Not OK:")
-        table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        label.show()
-        self.not_ok_label = gtk.Label("N/A")
-        table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
-            gtk.EXPAND | gtk.FILL, 5, 5)
-        self.not_ok_label.show()
-
-        self.window.show()
-        # For the demo.
-        self.window.set_keep_above(True)
-        self.window.present()
-
-    def stopTest(self, test):
-        super(GTKTestResult, self).stopTest(test)
-        gobject.idle_add(self._stopTest)
-
-    def _stopTest(self):
-        self.progress_model.advance()
-        if self.progress_model.width() == 0:
-            self.pbar.pulse()
-        else:
-            pos = self.progress_model.pos()
-            width = self.progress_model.width()
-            percentage = (pos / float(width))
-            self.pbar.set_fraction(percentage)
-
-    def stopTestRun(self):
-        try:
-            super(GTKTestResult, self).stopTestRun()
-        except AttributeError:
-            pass
-        gobject.idle_add(self.pbar.set_text, 'Finished')
-
-    def addError(self, test, err):
-        super(GTKTestResult, self).addError(test, err)
-        gobject.idle_add(self.update_counts)
-
-    def addFailure(self, test, err):
-        super(GTKTestResult, self).addFailure(test, err)
-        gobject.idle_add(self.update_counts)
-
-    def addSuccess(self, test):
-        super(GTKTestResult, self).addSuccess(test)
-        gobject.idle_add(self.update_counts)
-
-    def addSkip(self, test, reason):
-        # addSkip is new in Python 2.7/3.1
-        addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
-        if callable(addSkip):
-            addSkip(test, reason)
-        gobject.idle_add(self.update_counts)
-
-    def addExpectedFailure(self, test, err):
-        # addExpectedFailure is new in Python 2.7/3.1
-        addExpectedFailure = getattr(super(GTKTestResult, self),
-            'addExpectedFailure', None)
-        if callable(addExpectedFailure):
-            addExpectedFailure(test, err)
-        gobject.idle_add(self.update_counts)
-
-    def addUnexpectedSuccess(self, test):
-        # addUnexpectedSuccess is new in Python 2.7/3.1
-        addUnexpectedSuccess = getattr(super(GTKTestResult, self),
-            'addUnexpectedSuccess', None)
-        if callable(addUnexpectedSuccess):
-            addUnexpectedSuccess(test)
-        gobject.idle_add(self.update_counts)
-
-    def progress(self, offset, whence):
-        if whence == PROGRESS_PUSH:
-            self.progress_model.push()
-        elif whence == PROGRESS_POP:
-            self.progress_model.pop()
-        elif whence == PROGRESS_SET:
-            self.total_tests = offset
-            self.progress_model.set_width(offset)
-        else:
-            self.total_tests += offset
-            self.progress_model.adjust_width(offset)
-
-    def time(self, a_datetime):
-        # We don't try to estimate completion yet.
-        pass
-
-    def update_counts(self):
-        self.run_label.set_text(str(self.testsRun))
-        bad = len(self.failures + self.errors)
-        self.ok_label.set_text(str(self.testsRun - bad))
-        self.not_ok_label.set_text(str(bad))
-
-gobject.threads_init()
-result = StreamToExtendedDecorator(GTKTestResult())
-test = ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout')
-# Get setup
-while gtk.events_pending():
-  gtk.main_iteration()
-# Start IO
-def run_and_finish():
-    test.run(result)
-    result.stopTestRun()
-t = threading.Thread(target=run_and_finish)
-t.daemon = True
-result.startTestRun()
-t.start()
-gtk.main()
-if result.decorated.wasSuccessful():
-    exit_code = 0
-else:
-    exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2junitxml b/lib/subunit/filters/subunit2junitxml
deleted file mode 100755
index 8e827d5..0000000
--- a/lib/subunit/filters/subunit2junitxml
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Filter a subunit stream to get aggregate statistics."""
-
-
-import sys
-
-from testtools import StreamToExtendedDecorator
-
-from subunit.filters import run_filter_script
-
-try:
-    from junitxml import JUnitXmlResult
-except ImportError:
-    sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
-        "http://pypi.python.org/pypi/junitxml) is required for this filter.")
-    raise
-
-
-run_filter_script(
-    lambda output:StreamToExtendedDecorator(JUnitXmlResult(output)), __doc__,
-    protocol_version=2)
diff --git a/lib/subunit/filters/subunit2pyunit b/lib/subunit/filters/subunit2pyunit
deleted file mode 100755
index d10ceea..0000000
--- a/lib/subunit/filters/subunit2pyunit
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Display a subunit stream through python's unittest test runner."""
-
-from operator import methodcaller
-from optparse import OptionParser
-import sys
-import unittest
-
-from testtools import StreamToExtendedDecorator, DecorateTestCaseResult, StreamResultRouter
-
-from subunit import ByteStreamToStreamResult
-from subunit.filters import find_stream
-from subunit.test_results import CatFiles
-
-parser = OptionParser(description=__doc__)
-parser.add_option("--no-passthrough", action="store_true",
-    help="Hide all non subunit input.", default=False, dest="no_passthrough")
-parser.add_option("--progress", action="store_true",
-    help="Use bzrlib's test reporter (requires bzrlib)",
-        default=False)
-(options, args) = parser.parse_args()
-test = ByteStreamToStreamResult(
-    find_stream(sys.stdin, args), non_subunit_name='stdout')
-def wrap_result(result):
-    result = StreamToExtendedDecorator(result)
-    if not options.no_passthrough:
-        result = StreamResultRouter(result)
-        result.add_rule(CatFiles(sys.stdout), 'test_id', test_id=None)
-    return result
-test = DecorateTestCaseResult(test, wrap_result,
-    before_run=methodcaller('startTestRun'),
-    after_run=methodcaller('stopTestRun'))
-if options.progress:
-    from bzrlib.tests import TextTestRunner
-    from bzrlib import ui
-    ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
-    runner = TextTestRunner()
-else:
-    runner = unittest.TextTestRunner(verbosity=2)
-if runner.run(test).wasSuccessful():
-    exit_code = 0
-else:
-    exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/tap2subunit b/lib/subunit/filters/tap2subunit
deleted file mode 100755
index c571972..0000000
--- a/lib/subunit/filters/tap2subunit
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""A filter that reads a TAP stream and outputs a subunit stream.
-
-More information on TAP is available at
-http://testanything.org/wiki/index.php/Main_Page.
-"""
-
-import sys
-
-from subunit import TAP2SubUnit
-sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/lib/subunit/libcppunit_subunit.pc.in b/lib/subunit/libcppunit_subunit.pc.in
deleted file mode 100644
index 98982c7..0000000
--- a/lib/subunit/libcppunit_subunit.pc.in
+++ /dev/null
@@ -1,11 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: cppunit subunit listener
-Description: Subunit output listener for the CPPUnit test library.
-URL: http://launchpad.net/subunit
-Version: @VERSION@
-Libs: -L${libdir} -lsubunit
-Cflags: -I${includedir}
diff --git a/lib/subunit/libsubunit.pc.in b/lib/subunit/libsubunit.pc.in
deleted file mode 100644
index 6756414..0000000
--- a/lib/subunit/libsubunit.pc.in
+++ /dev/null
@@ -1,11 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: subunit
-Description: Subunit test protocol library.
-URL: http://launchpad.net/subunit
-Version: @VERSION@
-Libs: -L${libdir} -lsubunit
-Cflags: -I${includedir}
diff --git a/lib/subunit/perl/Makefile.PL.in b/lib/subunit/perl/Makefile.PL.in
deleted file mode 100755
index 749d468..0000000
--- a/lib/subunit/perl/Makefile.PL.in
+++ /dev/null
@@ -1,21 +0,0 @@
-use ExtUtils::MakeMaker;
-WriteMakefile(
-    'PREFIX' => '@prefix@',
-    'NAME'	=> 'Subunit',
-    'VERSION' => '@SUBUNIT_VERSION@',
-    'test' => { 'TESTS' => 'tests/*.pl' },
-    'PMLIBDIRS' => [ 'lib' ],
-    'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
-);
-sub MY::postamble {
-<<'EOT';
-check: # test
-
-uninstall_distcheck:
-	find $(DESTDIR)$(INSTALLSITEARCH) -type f -exec rm {} \;
-	rm MYMETA.yml
-
-VPATH = @srcdir@
-.PHONY: uninstall_distcheck
-EOT
-}
diff --git a/lib/subunit/perl/lib/Subunit.pm b/lib/subunit/perl/lib/Subunit.pm
deleted file mode 100644
index 72aa1eb..0000000
--- a/lib/subunit/perl/lib/Subunit.pm
+++ /dev/null
@@ -1,183 +0,0 @@
-# Perl module for parsing and generating the Subunit protocol
-# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer at samba.org>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-
-package Subunit;
-use POSIX;
-
-require Exporter;
- at ISA = qw(Exporter);
- at EXPORT_OK = qw(parse_results $VERSION);
-
-use vars qw ( $VERSION );
-
-$VERSION = '0.0.2';
-
-use strict;
-
-sub parse_results($$$)
-{
-	my ($msg_ops, $statistics, $fh) = @_;
-	my $expected_fail = 0;
-	my $unexpected_fail = 0;
-	my $unexpected_err = 0;
-	my $open_tests = [];
-
-	while(<$fh>) {
-		if (/^test: (.+)\n/) {
-			$msg_ops->control_msg($_);
-			$msg_ops->start_test($1);
-			push (@$open_tests, $1);
-		} elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
-			$msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
-		} elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
-			$msg_ops->control_msg($_);
-			my $result = $1;
-			my $testname = $2;
-			my $reason = undef;
-			if ($3) {
-				$reason = "";
-				# reason may be specified in next lines
-				my $terminated = 0;
-				while(<$fh>) {
-					$msg_ops->control_msg($_);
-					if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
-				}
-				
-				unless ($terminated) {
-					$statistics->{TESTS_ERROR}++;
-					$msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
-					return 1;
-				}
-			}
-			if ($result eq "success" or $result eq "successful") {
-				pop(@$open_tests); #FIXME: Check that popped value == $testname 
-				$statistics->{TESTS_EXPECTED_OK}++;
-				$msg_ops->end_test($testname, $result, 0, $reason);
-			} elsif ($result eq "xfail" or $result eq "knownfail") {
-				pop(@$open_tests); #FIXME: Check that popped value == $testname
-				$statistics->{TESTS_EXPECTED_FAIL}++;
-				$msg_ops->end_test($testname, $result, 0, $reason);
-				$expected_fail++;
-			} elsif ($result eq "failure" or $result eq "fail") {
-				pop(@$open_tests); #FIXME: Check that popped value == $testname
-				$statistics->{TESTS_UNEXPECTED_FAIL}++;
-				$msg_ops->end_test($testname, $result, 1, $reason);
-				$unexpected_fail++;
-			} elsif ($result eq "skip") {
-				$statistics->{TESTS_SKIP}++;
-				my $last = pop(@$open_tests);
-				if (defined($last) and $last ne $testname) {
-					push (@$open_tests, $testname);
-				}
-				$msg_ops->end_test($testname, $result, 0, $reason);
-			} elsif ($result eq "error") {
-				$statistics->{TESTS_ERROR}++;
-				pop(@$open_tests); #FIXME: Check that popped value == $testname
-				$msg_ops->end_test($testname, $result, 1, $reason);
-				$unexpected_err++;
-			} 
-		} else {
-			$msg_ops->output_msg($_);
-		}
-	}
-
-	while ($#$open_tests+1 > 0) {
-		$msg_ops->end_test(pop(@$open_tests), "error", 1,
-				   "was started but never finished!");
-		$statistics->{TESTS_ERROR}++;
-		$unexpected_err++;
-	}
-
-	return 1 if $unexpected_err > 0;
-	return 1 if $unexpected_fail > 0;
-	return 0;
-}
-
-sub start_test($)
-{
-	my ($testname) = @_;
-	print "test: $testname\n";
-}
-
-sub end_test($$;$)
-{
-	my $name = shift;
-	my $result = shift;
-	my $reason = shift;
-	if ($reason) {
-		print "$result: $name [\n";
-		print "$reason";
-		print "]\n";
-	} else {
-		print "$result: $name\n";
-	}
-}
-
-sub skip_test($;$)
-{
-	my $name = shift;
-	my $reason = shift;
-	end_test($name, "skip", $reason);
-}
-
-sub fail_test($;$)
-{
-	my $name = shift;
-	my $reason = shift;
-	end_test($name, "failure", $reason);
-}
-
-sub success_test($;$)
-{
-	my $name = shift;
-	my $reason = shift;
-	end_test($name, "success", $reason);
-}
-
-sub xfail_test($;$)
-{
-	my $name = shift;
-	my $reason = shift;
-	end_test($name, "xfail", $reason);
-}
-
-sub report_time($)
-{
-	my ($time) = @_;
-	my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
-	printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
-}
-
-sub progress_pop()
-{
-	print "progress: pop\n";
-}
-
-sub progress_push()
-{
-	print "progress: push\n";
-}
-
-sub progress($;$)
-{
-	my ($count, $whence) = @_;
-
-	unless(defined($whence)) {
-		$whence = "";
-	}
-
-	print "progress: $whence$count\n";
-}
-
-1;
diff --git a/lib/subunit/perl/lib/Subunit/Diff.pm b/lib/subunit/perl/lib/Subunit/Diff.pm
deleted file mode 100644
index e7841c3..0000000
--- a/lib/subunit/perl/lib/Subunit/Diff.pm
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/perl
-# Diff two subunit streams
-# Copyright (C) Jelmer Vernooij <jelmer at samba.org>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-
-package Subunit::Diff;
-
-use strict;
-
-use Subunit qw(parse_results);
-
-sub control_msg() { }
-sub report_time($$) { }
-
-sub output_msg($$)
-{
-	my ($self, $msg) = @_;
-
-	# No output for now, perhaps later diff this as well ?
-}
-
-sub start_test($$)
-{
-	my ($self, $testname) = @_;
-}
-
-sub end_test($$$$$)
-{
-	my ($self, $testname, $result, $unexpected, $reason) = @_;
-
-	$self->{$testname} = $result;
-}
-
-sub new {
-	my ($class) = @_;
-
-	my $self = { 
-	};
-	bless($self, $class);
-}
-
-sub from_file($)
-{
-	my ($path) = @_;
-	my $statistics = {
-		TESTS_UNEXPECTED_OK => 0,
-		TESTS_EXPECTED_OK => 0,
-		TESTS_UNEXPECTED_FAIL => 0,
-		TESTS_EXPECTED_FAIL => 0,
-		TESTS_ERROR => 0,
-		TESTS_SKIP => 0,
-	};
-
-	my $ret = new Subunit::Diff();
-	open(IN, $path) or return;
-	parse_results($ret, $statistics, *IN);
-	close(IN);
-	return $ret;
-}
-
-sub diff($$)
-{
-	my ($old, $new) = @_;
-	my $ret = {};
-
-	foreach my $testname (keys %$old) {
-		if ($new->{$testname} ne $old->{$testname}) {
-			$ret->{$testname} = [$old->{$testname}, $new->{$testname}];
-		}
-	}
-
-	return $ret;
-}
-
-1;
diff --git a/lib/subunit/perl/subunit-diff b/lib/subunit/perl/subunit-diff
deleted file mode 100755
index 581e832..0000000
--- a/lib/subunit/perl/subunit-diff
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/perl
-# Diff two subunit streams
-# Copyright (C) Jelmer Vernooij <jelmer at samba.org>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-
-use Getopt::Long;
-use strict;
-use FindBin qw($RealBin $Script);
-use lib "$RealBin/lib";
-use Subunit::Diff;
-
-my $old = Subunit::Diff::from_file($ARGV[0]);
-my $new = Subunit::Diff::from_file($ARGV[1]);
-
-my $ret = Subunit::Diff::diff($old, $new);
-
-foreach my $e (sort(keys %$ret)) {
-	printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
-}
-
-0;
diff --git a/lib/subunit/python/iso8601/LICENSE b/lib/subunit/python/iso8601/LICENSE
deleted file mode 100644
index 5ca93da..0000000
--- a/lib/subunit/python/iso8601/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2007 Michael Twomey
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/subunit/python/iso8601/README b/lib/subunit/python/iso8601/README
deleted file mode 100644
index 5ec9d45..0000000
--- a/lib/subunit/python/iso8601/README
+++ /dev/null
@@ -1,26 +0,0 @@
-A simple package to deal with ISO 8601 date time formats.
-
-ISO 8601 defines a neutral, unambiguous date string format, which also
-has the property of sorting naturally.
-
-e.g. YYYY-MM-DDTHH:MM:SSZ or 2007-01-25T12:00:00Z
-
-Currently this covers only the most common date formats encountered, not
-all of ISO 8601 is handled.
-
-Currently the following formats are handled:
-
-* 2006-01-01T00:00:00Z
-* 2006-01-01T00:00:00[+-]00:00
-
-I'll add more as I encounter them in my day to day life. Patches with 
-new formats and tests will be gratefully accepted of course :)
-
-References:
-
-* http://www.cl.cam.ac.uk/~mgk25/iso-time.html - simple overview
-
-* http://hydracen.com/dx/iso8601.htm - more detailed enumeration of
-  valid formats.
-
-See the LICENSE file for the license this package is released under.
diff --git a/lib/subunit/python/iso8601/README.subunit b/lib/subunit/python/iso8601/README.subunit
deleted file mode 100644
index d1ed8a1..0000000
--- a/lib/subunit/python/iso8601/README.subunit
+++ /dev/null
@@ -1,5 +0,0 @@
-This is a [slightly rearranged] import of http://pypi.python.org/pypi/iso8601/
-version 0.1.4. The OS X hidden files have been stripped, and the package
-turned into a single module, to simplify installation. The remainder of the
-source distribution is included in the subunit source tree at python/iso8601
-for reference.
diff --git a/lib/subunit/python/iso8601/setup.py b/lib/subunit/python/iso8601/setup.py
deleted file mode 100644
index cdb61ec..0000000
--- a/lib/subunit/python/iso8601/setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-try:
-    from setuptools import setup
-except ImportError:
-    from distutils import setup
-
-long_description="""Simple module to parse ISO 8601 dates
-
-This module parses the most common forms of ISO 8601 date strings (e.g.
-2007-01-14T20:34:22+00:00) into datetime objects.
-
->>> import iso8601
->>> iso8601.parse_date("2007-01-25T12:00:00Z")
-datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
->>>
-
-Changes
-=======
-
-0.1.4
------
-
-* The default_timezone argument wasn't being passed through correctly,
-  UTC was being used in every case. Fixes issue 10.
-
-0.1.3
------
-
-* Fixed the microsecond handling, the generated microsecond values were 
-  way too small. Fixes issue 9.
-
-0.1.2
------
-
-* Adding ParseError to __all__ in iso8601 module, allows people to import it.
-  Addresses issue 7.
-* Be a little more flexible when dealing with dates without leading zeroes.
-  This violates the spec a little, but handles more dates as seen in the 
-  field. Addresses issue 6.
-* Allow date/time separators other than T.
-
-0.1.1
------
-
-* When parsing dates without a timezone the specified default is used. If no
-  default is specified then UTC is used. Addresses issue 4.
-"""
-
-setup(
-    name="iso8601",
-    version="0.1.4",
-    description=long_description.split("\n")[0],
-    long_description=long_description,
-    author="Michael Twomey",
-    author_email="micktwomey+iso8601 at gmail.com",
-    url="http://code.google.com/p/pyiso8601/",
-    packages=["iso8601"],
-    license="MIT",
-)
diff --git a/lib/subunit/python/iso8601/test_iso8601.py b/lib/subunit/python/iso8601/test_iso8601.py
deleted file mode 100644
index ff9e273..0000000
--- a/lib/subunit/python/iso8601/test_iso8601.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import iso8601
-
-def test_iso8601_regex():
-    assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
-
-def test_timezone_regex():
-    assert iso8601.TIMEZONE_REGEX.match("+01:00")
-    assert iso8601.TIMEZONE_REGEX.match("+00:00")
-    assert iso8601.TIMEZONE_REGEX.match("+01:20")
-    assert iso8601.TIMEZONE_REGEX.match("-01:00")
-
-def test_parse_date():
-    d = iso8601.parse_date("2006-10-20T15:34:56Z")
-    assert d.year == 2006
-    assert d.month == 10
-    assert d.day == 20
-    assert d.hour == 15
-    assert d.minute == 34
-    assert d.second == 56
-    assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction():
-    d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
-    assert d.year == 2006
-    assert d.month == 10
-    assert d.day == 20
-    assert d.hour == 15
-    assert d.minute == 34
-    assert d.second == 56
-    assert d.microsecond == 123000
-    assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction_2():
-    """From bug 6
-    
-    """
-    d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
-    assert d.year == 2007
-    assert d.month == 5
-    assert d.day == 7
-    assert d.hour == 11
-    assert d.minute == 43
-    assert d.second == 55
-    assert d.microsecond == 328000
-    assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_tz():
-    d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
-    assert d.year == 2006
-    assert d.month == 10
-    assert d.day == 20
-    assert d.hour == 15
-    assert d.minute == 34
-    assert d.second == 56
-    assert d.microsecond == 123000
-    assert d.tzinfo.tzname(None) == "+02:30"
-    offset = d.tzinfo.utcoffset(None)
-    assert offset.days == 0
-    assert offset.seconds == 60 * 60 * 2.5
-
-def test_parse_invalid_date():
-    try:
-        iso8601.parse_date(None)
-    except iso8601.ParseError:
-        pass
-    else:
-        assert 1 == 2
-
-def test_parse_invalid_date2():
-    try:
-        iso8601.parse_date("23")
-    except iso8601.ParseError:
-        pass
-    else:
-        assert 1 == 2
-
-def test_parse_no_timezone():
-    """issue 4 - Handle datetime string without timezone
-    
-    This tests what happens when you parse a date with no timezone. While not
-    strictly correct this is quite common. I'll assume UTC for the time zone
-    in this case.
-    """
-    d = iso8601.parse_date("2007-01-01T08:00:00")
-    assert d.year == 2007
-    assert d.month == 1
-    assert d.day == 1
-    assert d.hour == 8
-    assert d.minute == 0
-    assert d.second == 0
-    assert d.microsecond == 0
-    assert d.tzinfo == iso8601.UTC
-
-def test_parse_no_timezone_different_default():
-    tz = iso8601.FixedOffset(2, 0, "test offset")
-    d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
-    assert d.tzinfo == tz
-
-def test_space_separator():
-    """Handle a separator other than T
-    
-    """
-    d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
-    assert d.year == 2007
-    assert d.month == 6
-    assert d.day == 23
-    assert d.hour == 6
-    assert d.minute == 40
-    assert d.second == 34
-    assert d.microsecond == 0
-    assert d.tzinfo == iso8601.UTC
diff --git a/lib/subunit/python/subunit/__init__.py b/lib/subunit/python/subunit/__init__.py
deleted file mode 100644
index 2f95f53..0000000
--- a/lib/subunit/python/subunit/__init__.py
+++ /dev/null
@@ -1,1320 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Subunit - a streaming test protocol
-
-Overview
-++++++++
-
-The ``subunit`` Python package provides a number of ``unittest`` extensions
-which can be used to cause tests to output Subunit, to parse Subunit streams
-into test activity, perform seamless test isolation within a regular test
-case and variously sort, filter and report on test runs.
-
-
-Key Classes
------------
-
-The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
-extension which will translate a test run into a Subunit stream.
-
-The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
-protocol and the ``unittest.TestCase`` object protocol. It is used to translate
-a stream into a test run, which regular ``unittest.TestResult`` objects can
-process and report/inspect.
-
-Subunit has support for non-blocking usage too, for use with asyncore or
-Twisted. See the ``TestProtocolServer`` parser class for more details.
-
-Subunit includes extensions to the Python ``TestResult`` protocol. These are
-all done in a compatible manner: ``TestResult`` objects that do not implement
-the extension methods will not cause errors to be raised, instead the extension
-will either lose fidelity (for instance, folding expected failures to success
-in Python versions < 2.7 or 3.1), or discard the extended data (for extra
-details, tags, timestamping and progress markers).
-
-The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
-``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
-which can be used instead of the usual python unittest parameter.
-When used the value of details should be a dict from ``string`` to
-``testtools.content.Content`` objects. This is a draft API being worked on with
-the Python Testing In Python mail list, with the goal of permitting a common
-way to provide additional data beyond a traceback, such as captured data from
-disk, logging messages etc. The reference for this API is in testtools (0.9.0
-and newer).
-
-The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
-remove tags in the test run that is currently executing. If called when no
-test is in progress (that is, if called outside of the ``startTest``,
-``stopTest`` pair), the the tags apply to all subsequent tests. If called
-when a test is in progress, then the tags only apply to that test.
-
-The ``time(a_datetime)`` method is called (if present) when a ``time:``
-directive is encountered in a Subunit stream. This is used to tell a TestResult
-about the time that events in the stream occurred at, to allow reconstructing
-test timing from a stream.
-
-The ``progress(offset, whence)`` method controls progress data for a stream.
-The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
-subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
-ignore the offset parameter.
-
-
-Python test support
--------------------
-
-``subunit.run`` is a convenience wrapper to run a Python test suite via
-the command line, reporting via Subunit::
-
-  $ python -m subunit.run mylib.tests.test_suite
-
-The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
-tests, allowing isolation between the test runner and some tests.
-
-Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
-tests that will fork() before that individual test is run.
-
-`ExecTestCase`` is a convenience wrapper for running an external
-program to get a Subunit stream and then report that back to an arbitrary
-result object::
-
- class AggregateTests(subunit.ExecTestCase):
-
-     def test_script_one(self):
-         './bin/script_one'
-
-     def test_script_two(self):
-         './bin/script_two'
-
- # Normally your normal test loading would take of this automatically,
- # It is only spelt out in detail here for clarity.
- suite = unittest.TestSuite([AggregateTests("test_script_one"),
-     AggregateTests("test_script_two")])
- # Create any TestResult class you like.
- result = unittest._TextTestResult(sys.stdout)
- # And run your suite as normal, Subunit will exec each external script as
- # needed and report to your result object.
- suite.run(result)
-
-Utility modules
----------------
-
-* subunit.chunked contains HTTP chunked encoding/decoding logic.
-* subunit.test_results contains TestResult helper classes.
-"""
-
-import os
-import re
-import subprocess
-import sys
-import unittest
-try:
-    from io import UnsupportedOperation as _UnsupportedOperation
-except ImportError:
-    _UnsupportedOperation = AttributeError
-
-from extras import safe_hasattr
-from testtools import content, content_type, ExtendedToOriginalDecorator
-from testtools.content import TracebackContent
-from testtools.compat import _b, _u, BytesIO, StringIO
-try:
-    from testtools.testresult.real import _StringException
-    RemoteException = _StringException
-except ImportError:
-    raise ImportError ("testtools.testresult.real does not contain "
-        "_StringException, check your version.")
-from testtools import testresult, CopyStreamResult
-
-from subunit import chunked, details, iso8601, test_results
-from subunit.v2 import ByteStreamToStreamResult, StreamResultToBytes
-
-# same format as sys.version_info: "A tuple containing the five components of
-# the version number: major, minor, micro, releaselevel, and serial. All
-# values except releaselevel are integers; the release level is 'alpha',
-# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
-# Python version 2.0 is (2, 0, 0, 'final', 0)."  Additionally we use a
-# releaselevel of 'dev' for unreleased under-development code.
-#
-# If the releaselevel is 'alpha' then the major/minor/micro components are not
-# established at this point, and setup.py will use a version of next-$(revno).
-# If the releaselevel is 'final', then the tarball will be major.minor.micro.
-# Otherwise it is major.minor.micro~$(revno).
-
-__version__ = (0, 0, 21, 'final', 0)
-
-PROGRESS_SET = 0
-PROGRESS_CUR = 1
-PROGRESS_PUSH = 2
-PROGRESS_POP = 3
-
-
-def test_suite():
-    import subunit.tests
-    return subunit.tests.test_suite()
-
-
-def join_dir(base_path, path):
-    """
-    Returns an absolute path to C{path}, calculated relative to the parent
-    of C{base_path}.
-
-    @param base_path: A path to a file or directory.
-    @param path: An absolute path, or a path relative to the containing
-    directory of C{base_path}.
-
-    @return: An absolute path to C{path}.
-    """
-    return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
-
-
-def tags_to_new_gone(tags):
-    """Split a list of tags into a new_set and a gone_set."""
-    new_tags = set()
-    gone_tags = set()
-    for tag in tags:
-        if tag[0] == '-':
-            gone_tags.add(tag[1:])
-        else:
-            new_tags.add(tag)
-    return new_tags, gone_tags
-
-
-class DiscardStream(object):
-    """A filelike object which discards what is written to it."""
-
-    def fileno(self):
-        raise _UnsupportedOperation()
-
-    def write(self, bytes):
-        pass
-
-    def read(self, len=0):
-        return _b('')
-
-
-class _ParserState(object):
-    """State for the subunit parser."""
-
-    def __init__(self, parser):
-        self.parser = parser
-        self._test_sym = (_b('test'), _b('testing'))
-        self._colon_sym = _b(':')
-        self._error_sym = (_b('error'),)
-        self._failure_sym = (_b('failure'),)
-        self._progress_sym = (_b('progress'),)
-        self._skip_sym = _b('skip')
-        self._success_sym = (_b('success'), _b('successful'))
-        self._tags_sym = (_b('tags'),)
-        self._time_sym = (_b('time'),)
-        self._xfail_sym = (_b('xfail'),)
-        self._uxsuccess_sym = (_b('uxsuccess'),)
-        self._start_simple = _u(" [")
-        self._start_multipart = _u(" [ multipart")
-
-    def addError(self, offset, line):
-        """An 'error:' directive has been read."""
-        self.parser.stdOutLineReceived(line)
-
-    def addExpectedFail(self, offset, line):
-        """An 'xfail:' directive has been read."""
-        self.parser.stdOutLineReceived(line)
-
-    def addFailure(self, offset, line):
-        """A 'failure:' directive has been read."""
-        self.parser.stdOutLineReceived(line)
-
-    def addSkip(self, offset, line):
-        """A 'skip:' directive has been read."""
-        self.parser.stdOutLineReceived(line)
-
-    def addSuccess(self, offset, line):
-        """A 'success:' directive has been read."""
-        self.parser.stdOutLineReceived(line)
-
-    def lineReceived(self, line):
-        """a line has been received."""
-        parts = line.split(None, 1)
-        if len(parts) == 2 and line.startswith(parts[0]):
-            cmd, rest = parts
-            offset = len(cmd) + 1
-            cmd = cmd.rstrip(self._colon_sym)
-            if cmd in self._test_sym:
-                self.startTest(offset, line)
-            elif cmd in self._error_sym:
-                self.addError(offset, line)
-            elif cmd in self._failure_sym:
-                self.addFailure(offset, line)
-            elif cmd in self._progress_sym:
-                self.parser._handleProgress(offset, line)
-            elif cmd in self._skip_sym:
-                self.addSkip(offset, line)
-            elif cmd in self._success_sym:
-                self.addSuccess(offset, line)
-            elif cmd in self._tags_sym:
-                self.parser._handleTags(offset, line)
-                self.parser.subunitLineReceived(line)
-            elif cmd in self._time_sym:
-                self.parser._handleTime(offset, line)
-                self.parser.subunitLineReceived(line)
-            elif cmd in self._xfail_sym:
-                self.addExpectedFail(offset, line)
-            elif cmd in self._uxsuccess_sym:
-                self.addUnexpectedSuccess(offset, line)
-            else:
-                self.parser.stdOutLineReceived(line)
-        else:
-            self.parser.stdOutLineReceived(line)
-
-    def lostConnection(self):
-        """Connection lost."""
-        self.parser._lostConnectionInTest(_u('unknown state of '))
-
-    def startTest(self, offset, line):
-        """A test start command received."""
-        self.parser.stdOutLineReceived(line)
-
-
-class _InTest(_ParserState):
-    """State for the subunit parser after reading a test: directive."""
-
-    def _outcome(self, offset, line, no_details, details_state):
-        """An outcome directive has been read.
-
-        :param no_details: Callable to call when no details are presented.
-        :param details_state: The state to switch to for details
-            processing of this outcome.
-        """
-        test_name = line[offset:-1].decode('utf8')
-        if self.parser.current_test_description == test_name:
-            self.parser._state = self.parser._outside_test
-            self.parser.current_test_description = None
-            no_details()
-            self.parser.client.stopTest(self.parser._current_test)
-            self.parser._current_test = None
-            self.parser.subunitLineReceived(line)
-        elif self.parser.current_test_description + self._start_simple == \
-            test_name:
-            self.parser._state = details_state
-            details_state.set_simple()
-            self.parser.subunitLineReceived(line)
-        elif self.parser.current_test_description + self._start_multipart == \
-            test_name:
-            self.parser._state = details_state
-            details_state.set_multipart()
-            self.parser.subunitLineReceived(line)
-        else:
-            self.parser.stdOutLineReceived(line)
-
-    def _error(self):
-        self.parser.client.addError(self.parser._current_test,
-            details={})
-
-    def addError(self, offset, line):
-        """An 'error:' directive has been read."""
-        self._outcome(offset, line, self._error,
-            self.parser._reading_error_details)
-
-    def _xfail(self):
-        self.parser.client.addExpectedFailure(self.parser._current_test,
-            details={})
-
-    def addExpectedFail(self, offset, line):
-        """An 'xfail:' directive has been read."""
-        self._outcome(offset, line, self._xfail,
-            self.parser._reading_xfail_details)
-
-    def _uxsuccess(self):
-        self.parser.client.addUnexpectedSuccess(self.parser._current_test)
-
-    def addUnexpectedSuccess(self, offset, line):
-        """A 'uxsuccess:' directive has been read."""
-        self._outcome(offset, line, self._uxsuccess,
-            self.parser._reading_uxsuccess_details)
-
-    def _failure(self):
-        self.parser.client.addFailure(self.parser._current_test, details={})
-
-    def addFailure(self, offset, line):
-        """A 'failure:' directive has been read."""
-        self._outcome(offset, line, self._failure,
-            self.parser._reading_failure_details)
-
-    def _skip(self):
-        self.parser.client.addSkip(self.parser._current_test, details={})
-
-    def addSkip(self, offset, line):
-        """A 'skip:' directive has been read."""
-        self._outcome(offset, line, self._skip,
-            self.parser._reading_skip_details)
-
-    def _succeed(self):
-        self.parser.client.addSuccess(self.parser._current_test, details={})
-
-    def addSuccess(self, offset, line):
-        """A 'success:' directive has been read."""
-        self._outcome(offset, line, self._succeed,
-            self.parser._reading_success_details)
-
-    def lostConnection(self):
-        """Connection lost."""
-        self.parser._lostConnectionInTest(_u(''))
-
-
-class _OutSideTest(_ParserState):
-    """State for the subunit parser outside of a test context."""
-
-    def lostConnection(self):
-        """Connection lost."""
-
-    def startTest(self, offset, line):
-        """A test start command received."""
-        self.parser._state = self.parser._in_test
-        test_name = line[offset:-1].decode('utf8')
-        self.parser._current_test = RemotedTestCase(test_name)
-        self.parser.current_test_description = test_name
-        self.parser.client.startTest(self.parser._current_test)
-        self.parser.subunitLineReceived(line)
-
-
-class _ReadingDetails(_ParserState):
-    """Common logic for readin state details."""
-
-    def endDetails(self):
-        """The end of a details section has been reached."""
-        self.parser._state = self.parser._outside_test
-        self.parser.current_test_description = None
-        self._report_outcome()
-        self.parser.client.stopTest(self.parser._current_test)
-
-    def lineReceived(self, line):
-        """a line has been received."""
-        self.details_parser.lineReceived(line)
-        self.parser.subunitLineReceived(line)
-
-    def lostConnection(self):
-        """Connection lost."""
-        self.parser._lostConnectionInTest(_u('%s report of ') %
-            self._outcome_label())
-
-    def _outcome_label(self):
-        """The label to describe this outcome."""
-        raise NotImplementedError(self._outcome_label)
-
-    def set_simple(self):
-        """Start a simple details parser."""
-        self.details_parser = details.SimpleDetailsParser(self)
-
-    def set_multipart(self):
-        """Start a multipart details parser."""
-        self.details_parser = details.MultipartDetailsParser(self)
-
-
-class _ReadingFailureDetails(_ReadingDetails):
-    """State for the subunit parser when reading failure details."""
-
-    def _report_outcome(self):
-        self.parser.client.addFailure(self.parser._current_test,
-            details=self.details_parser.get_details())
-
-    def _outcome_label(self):
-        return "failure"
-
-
-class _ReadingErrorDetails(_ReadingDetails):
-    """State for the subunit parser when reading error details."""
-
-    def _report_outcome(self):
-        self.parser.client.addError(self.parser._current_test,
-            details=self.details_parser.get_details())
-
-    def _outcome_label(self):
-        return "error"
-
-
-class _ReadingExpectedFailureDetails(_ReadingDetails):
-    """State for the subunit parser when reading xfail details."""
-
-    def _report_outcome(self):
-        self.parser.client.addExpectedFailure(self.parser._current_test,
-            details=self.details_parser.get_details())
-
-    def _outcome_label(self):
-        return "xfail"
-
-
-class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
-    """State for the subunit parser when reading uxsuccess details."""
-
-    def _report_outcome(self):
-        self.parser.client.addUnexpectedSuccess(self.parser._current_test,
-            details=self.details_parser.get_details())
-
-    def _outcome_label(self):
-        return "uxsuccess"
-
-
-class _ReadingSkipDetails(_ReadingDetails):
-    """State for the subunit parser when reading skip details."""
-
-    def _report_outcome(self):
-        self.parser.client.addSkip(self.parser._current_test,
-            details=self.details_parser.get_details("skip"))
-
-    def _outcome_label(self):
-        return "skip"
-
-
-class _ReadingSuccessDetails(_ReadingDetails):
-    """State for the subunit parser when reading success details."""
-
-    def _report_outcome(self):
-        self.parser.client.addSuccess(self.parser._current_test,
-            details=self.details_parser.get_details("success"))
-
-    def _outcome_label(self):
-        return "success"
-
-
-class TestProtocolServer(object):
-    """A parser for subunit.
-
-    :ivar tags: The current tags associated with the protocol stream.
-    """
-
-    def __init__(self, client, stream=None, forward_stream=None):
-        """Create a TestProtocolServer instance.
-
-        :param client: An object meeting the unittest.TestResult protocol.
-        :param stream: The stream that lines received which are not part of the
-            subunit protocol should be written to. This allows custom handling
-            of mixed protocols. By default, sys.stdout will be used for
-            convenience. It should accept bytes to its write() method.
-        :param forward_stream: A stream to forward subunit lines to. This
-            allows a filter to forward the entire stream while still parsing
-            and acting on it. By default forward_stream is set to
-            DiscardStream() and no forwarding happens.
-        """
-        self.client = ExtendedToOriginalDecorator(client)
-        if stream is None:
-            stream = sys.stdout
-            if sys.version_info > (3, 0):
-                stream = stream.buffer
-        self._stream = stream
-        self._forward_stream = forward_stream or DiscardStream()
-        # state objects we can switch too
-        self._in_test = _InTest(self)
-        self._outside_test = _OutSideTest(self)
-        self._reading_error_details = _ReadingErrorDetails(self)
-        self._reading_failure_details = _ReadingFailureDetails(self)
-        self._reading_skip_details = _ReadingSkipDetails(self)
-        self._reading_success_details = _ReadingSuccessDetails(self)
-        self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
-        self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
-        # start with outside test.
-        self._state = self._outside_test
-        # Avoid casts on every call
-        self._plusminus = _b('+-')
-        self._push_sym = _b('push')
-        self._pop_sym = _b('pop')
-
-    def _handleProgress(self, offset, line):
-        """Process a progress directive."""
-        line = line[offset:].strip()
-        if line[0] in self._plusminus:
-            whence = PROGRESS_CUR
-            delta = int(line)
-        elif line == self._push_sym:
-            whence = PROGRESS_PUSH
-            delta = None
-        elif line == self._pop_sym:
-            whence = PROGRESS_POP
-            delta = None
-        else:
-            whence = PROGRESS_SET
-            delta = int(line)
-        self.client.progress(delta, whence)
-
-    def _handleTags(self, offset, line):
-        """Process a tags command."""
-        tags = line[offset:].decode('utf8').split()
-        new_tags, gone_tags = tags_to_new_gone(tags)
-        self.client.tags(new_tags, gone_tags)
-
-    def _handleTime(self, offset, line):
-        # Accept it, but do not do anything with it yet.
-        try:
-            event_time = iso8601.parse_date(line[offset:-1])
-        except TypeError:
-            raise TypeError(_u("Failed to parse %r, got %r")
-                % (line, sys.exec_info[1]))
-        self.client.time(event_time)
-
-    def lineReceived(self, line):
-        """Call the appropriate local method for the received line."""
-        self._state.lineReceived(line)
-
-    def _lostConnectionInTest(self, state_string):
-        error_string = _u("lost connection during %stest '%s'") % (
-            state_string, self.current_test_description)
-        self.client.addError(self._current_test, RemoteError(error_string))
-        self.client.stopTest(self._current_test)
-
-    def lostConnection(self):
-        """The input connection has finished."""
-        self._state.lostConnection()
-
-    def readFrom(self, pipe):
-        """Blocking convenience API to parse an entire stream.
-
-        :param pipe: A file-like object supporting readlines().
-        :return: None.
-        """
-        for line in pipe.readlines():
-            self.lineReceived(line)
-        self.lostConnection()
-
-    def _startTest(self, offset, line):
-        """Internal call to change state machine. Override startTest()."""
-        self._state.startTest(offset, line)
-
-    def subunitLineReceived(self, line):
-        self._forward_stream.write(line)
-
-    def stdOutLineReceived(self, line):
-        self._stream.write(line)
-
-
-class TestProtocolClient(testresult.TestResult):
-    """A TestResult which generates a subunit stream for a test run.
-
-    # Get a TestSuite or TestCase to run
-    suite = make_suite()
-    # Create a stream (any object with a 'write' method). This should accept
-    # bytes not strings: subunit is a byte orientated protocol.
-    stream = file('tests.log', 'wb')
-    # Create a subunit result object which will output to the stream
-    result = subunit.TestProtocolClient(stream)
-    # Optionally, to get timing data for performance analysis, wrap the
-    # serialiser with a timing decorator
-    result = subunit.test_results.AutoTimingTestResultDecorator(result)
-    # Run the test suite reporting to the subunit result object
-    suite.run(result)
-    # Close the stream.
-    stream.close()
-    """
-
-    def __init__(self, stream):
-        testresult.TestResult.__init__(self)
-        stream = make_stream_binary(stream)
-        self._stream = stream
-        self._progress_fmt = _b("progress: ")
-        self._bytes_eol = _b("\n")
-        self._progress_plus = _b("+")
-        self._progress_push = _b("push")
-        self._progress_pop = _b("pop")
-        self._empty_bytes = _b("")
-        self._start_simple = _b(" [\n")
-        self._end_simple = _b("]\n")
-
-    def addError(self, test, error=None, details=None):
-        """Report an error in test test.
-
-        Only one of error and details should be provided: conceptually there
-        are two separate methods:
-            addError(self, test, error)
-            addError(self, test, details)
-
-        :param error: Standard unittest positional argument form - an
-            exc_info tuple.
-        :param details: New Testing-in-python drafted API; a dict from string
-            to subunit.Content objects.
-        """
-        self._addOutcome("error", test, error=error, details=details)
-        if self.failfast:
-            self.stop()
-
-    def addExpectedFailure(self, test, error=None, details=None):
-        """Report an expected failure in test test.
-
-        Only one of error and details should be provided: conceptually there
-        are two separate methods:
-            addError(self, test, error)
-            addError(self, test, details)
-
-        :param error: Standard unittest positional argument form - an
-            exc_info tuple.
-        :param details: New Testing-in-python drafted API; a dict from string
-            to subunit.Content objects.
-        """
-        self._addOutcome("xfail", test, error=error, details=details)
-
-    def addFailure(self, test, error=None, details=None):
-        """Report a failure in test test.
-
-        Only one of error and details should be provided: conceptually there
-        are two separate methods:
-            addFailure(self, test, error)
-            addFailure(self, test, details)
-
-        :param error: Standard unittest positional argument form - an
-            exc_info tuple.
-        :param details: New Testing-in-python drafted API; a dict from string
-            to subunit.Content objects.
-        """
-        self._addOutcome("failure", test, error=error, details=details)
-        if self.failfast:
-            self.stop()
-
-    def _addOutcome(self, outcome, test, error=None, details=None,
-        error_permitted=True):
-        """Report a failure in test test.
-
-        Only one of error and details should be provided: conceptually there
-        are two separate methods:
-            addOutcome(self, test, error)
-            addOutcome(self, test, details)
-
-        :param outcome: A string describing the outcome - used as the
-            event name in the subunit stream.
-        :param error: Standard unittest positional argument form - an
-            exc_info tuple.
-        :param details: New Testing-in-python drafted API; a dict from string
-            to subunit.Content objects.
-        :param error_permitted: If True then one and only one of error or
-            details must be supplied. If False then error must not be supplied
-            and details is still optional.  """
-        self._stream.write(_b("%s: " % outcome) + self._test_id(test))
-        if error_permitted:
-            if error is None and details is None:
-                raise ValueError
-        else:
-            if error is not None:
-                raise ValueError
-        if error is not None:
-            self._stream.write(self._start_simple)
-            tb_content = TracebackContent(error, test)
-            for bytes in tb_content.iter_bytes():
-                self._stream.write(bytes)
-        elif details is not None:
-            self._write_details(details)
-        else:
-            self._stream.write(_b("\n"))
-        if details is not None or error is not None:
-            self._stream.write(self._end_simple)
-
-    def addSkip(self, test, reason=None, details=None):
-        """Report a skipped test."""
-        if reason is None:
-            self._addOutcome("skip", test, error=None, details=details)
-        else:
-            self._stream.write(_b("skip: %s [\n" % test.id()))
-            self._stream.write(_b("%s\n" % reason))
-            self._stream.write(self._end_simple)
-
-    def addSuccess(self, test, details=None):
-        """Report a success in a test."""
-        self._addOutcome("successful", test, details=details, error_permitted=False)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        """Report an unexpected success in test test.
-
-        Details can optionally be provided: conceptually there
-        are two separate methods:
-            addError(self, test)
-            addError(self, test, details)
-
-        :param details: New Testing-in-python drafted API; a dict from string
-            to subunit.Content objects.
-        """
-        self._addOutcome("uxsuccess", test, details=details,
-            error_permitted=False)
-        if self.failfast:
-            self.stop()
-
-    def _test_id(self, test):
-        result = test.id()
-        if type(result) is not bytes:
-            result = result.encode('utf8')
-        return result
-
-    def startTest(self, test):
-        """Mark a test as starting its test run."""
-        super(TestProtocolClient, self).startTest(test)
-        self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
-        self._stream.flush()
-
-    def stopTest(self, test):
-        super(TestProtocolClient, self).stopTest(test)
-        self._stream.flush()
-
-    def progress(self, offset, whence):
-        """Provide indication about the progress/length of the test run.
-
-        :param offset: Information about the number of tests remaining. If
-            whence is PROGRESS_CUR, then offset increases/decreases the
-            remaining test count. If whence is PROGRESS_SET, then offset
-            specifies exactly the remaining test count.
-        :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
-            PROGRESS_POP.
-        """
-        if whence == PROGRESS_CUR and offset > -1:
-            prefix = self._progress_plus
-            offset = _b(str(offset))
-        elif whence == PROGRESS_PUSH:
-            prefix = self._empty_bytes
-            offset = self._progress_push
-        elif whence == PROGRESS_POP:
-            prefix = self._empty_bytes
-            offset = self._progress_pop
-        else:
-            prefix = self._empty_bytes
-            offset = _b(str(offset))
-        self._stream.write(self._progress_fmt + prefix + offset +
-            self._bytes_eol)
-
-    def tags(self, new_tags, gone_tags):
-        """Inform the client about tags added/removed from the stream."""
-        if not new_tags and not gone_tags:
-            return
-        tags = set([tag.encode('utf8') for tag in new_tags])
-        tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
-        tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
-        self._stream.write(tag_line)
-
-    def time(self, a_datetime):
-        """Inform the client of the time.
-
-        ":param datetime: A datetime.datetime object.
-        """
-        time = a_datetime.astimezone(iso8601.Utc())
-        self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
-            time.year, time.month, time.day, time.hour, time.minute,
-            time.second, time.microsecond)))
-
-    def _write_details(self, details):
-        """Output details to the stream.
-
-        :param details: An extended details dict for a test outcome.
-        """
-        self._stream.write(_b(" [ multipart\n"))
-        for name, content in sorted(details.items()):
-            self._stream.write(_b("Content-Type: %s/%s" %
-                (content.content_type.type, content.content_type.subtype)))
-            parameters = content.content_type.parameters
-            if parameters:
-                self._stream.write(_b(";"))
-                param_strs = []
-                for param, value in parameters.items():
-                    param_strs.append("%s=%s" % (param, value))
-                self._stream.write(_b(",".join(param_strs)))
-            self._stream.write(_b("\n%s\n" % name))
-            encoder = chunked.Encoder(self._stream)
-            list(map(encoder.write, content.iter_bytes()))
-            encoder.close()
-
-    def done(self):
-        """Obey the testtools result.done() interface."""
-
-
-def RemoteError(description=_u("")):
-    return (_StringException, _StringException(description), None)
-
-
-class RemotedTestCase(unittest.TestCase):
-    """A class to represent test cases run in child processes.
-
-    Instances of this class are used to provide the Python test API a TestCase
-    that can be printed to the screen, introspected for metadata and so on.
-    However, as they are a simply a memoisation of a test that was actually
-    run in the past by a separate process, they cannot perform any interactive
-    actions.
-    """
-
-    def __eq__ (self, other):
-        try:
-            return self.__description == other.__description
-        except AttributeError:
-            return False
-
-    def __init__(self, description):
-        """Create a psuedo test case with description description."""
-        self.__description = description
-
-    def error(self, label):
-        raise NotImplementedError("%s on RemotedTestCases is not permitted." %
-            label)
-
-    def setUp(self):
-        self.error("setUp")
-
-    def tearDown(self):
-        self.error("tearDown")
-
-    def shortDescription(self):
-        return self.__description
-
-    def id(self):
-        return "%s" % (self.__description,)
-
-    def __str__(self):
-        return "%s (%s)" % (self.__description, self._strclass())
-
-    def __repr__(self):
-        return "<%s description='%s'>" % \
-               (self._strclass(), self.__description)
-
-    def run(self, result=None):
-        if result is None: result = self.defaultTestResult()
-        result.startTest(self)
-        result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
-        result.stopTest(self)
-
-    def _strclass(self):
-        cls = self.__class__
-        return "%s.%s" % (cls.__module__, cls.__name__)
-
-
-class ExecTestCase(unittest.TestCase):
-    """A test case which runs external scripts for test fixtures."""
-
-    def __init__(self, methodName='runTest'):
-        """Create an instance of the class that will use the named test
-           method when executed. Raises a ValueError if the instance does
-           not have a method with the specified name.
-        """
-        unittest.TestCase.__init__(self, methodName)
-        testMethod = getattr(self, methodName)
-        self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
-                               testMethod.__doc__)
-
-    def countTestCases(self):
-        return 1
-
-    def run(self, result=None):
-        if result is None: result = self.defaultTestResult()
-        self._run(result)
-
-    def debug(self):
-        """Run the test without collecting errors in a TestResult"""
-        self._run(testresult.TestResult())
-
-    def _run(self, result):
-        protocol = TestProtocolServer(result)
-        process = subprocess.Popen(self.script, shell=True,
-            stdout=subprocess.PIPE)
-        make_stream_binary(process.stdout)
-        output = process.communicate()[0]
-        protocol.readFrom(BytesIO(output))
-
-
-class IsolatedTestCase(unittest.TestCase):
-    """A TestCase which executes in a forked process.
-
-    Each test gets its own process, which has a performance overhead but will
-    provide excellent isolation from global state (such as django configs,
-    zope utilities and so on).
-    """
-
-    def run(self, result=None):
-        if result is None: result = self.defaultTestResult()
-        run_isolated(unittest.TestCase, self, result)
-
-
-class IsolatedTestSuite(unittest.TestSuite):
-    """A TestSuite which runs its tests in a forked process.
-
-    This decorator that will fork() before running the tests and report the
-    results from the child process using a Subunit stream.  This is useful for
-    handling tests that mutate global state, or are testing C extensions that
-    could crash the VM.
-    """
-
-    def run(self, result=None):
-        if result is None: result = testresult.TestResult()
-        run_isolated(unittest.TestSuite, self, result)
-
-
-def run_isolated(klass, self, result):
-    """Run a test suite or case in a subprocess, using the run method on klass.
-    """
-    c2pread, c2pwrite = os.pipe()
-    # fixme - error -> result
-    # now fork
-    pid = os.fork()
-    if pid == 0:
-        # Child
-        # Close parent's pipe ends
-        os.close(c2pread)
-        # Dup fds for child
-        os.dup2(c2pwrite, 1)
-        # Close pipe fds.
-        os.close(c2pwrite)
-
-        # at this point, sys.stdin is redirected, now we want
-        # to filter it to escape ]'s.
-        ### XXX: test and write that bit.
-        stream = os.fdopen(1, 'wb')
-        result = TestProtocolClient(stream)
-        klass.run(self, result)
-        stream.flush()
-        sys.stderr.flush()
-        # exit HARD, exit NOW.
-        os._exit(0)
-    else:
-        # Parent
-        # Close child pipe ends
-        os.close(c2pwrite)
-        # hookup a protocol engine
-        protocol = TestProtocolServer(result)
-        fileobj = os.fdopen(c2pread, 'rb')
-        protocol.readFrom(fileobj)
-        os.waitpid(pid, 0)
-        # TODO return code evaluation.
-    return result
-
-
-def TAP2SubUnit(tap, output_stream):
-    """Filter a TAP pipe into a subunit pipe.
-
-    This should be invoked once per TAP script, as TAP scripts get
-    mapped to a single runnable case with multiple components.
-
-    :param tap: A tap pipe/stream/file object - should emit unicode strings.
-    :param subunit: A pipe/stream/file object to write subunit results to.
-    :return: The exit code to exit with.
-    """
-    output = StreamResultToBytes(output_stream)
-    UTF8_TEXT = 'text/plain; charset=UTF8'
-    BEFORE_PLAN = 0
-    AFTER_PLAN = 1
-    SKIP_STREAM = 2
-    state = BEFORE_PLAN
-    plan_start = 1
-    plan_stop = 0
-    # Test data for the next test to emit
-    test_name = None
-    log = []
-    result = None
-    def missing_test(plan_start):
-        output.status(test_id='test %d' % plan_start,
-            test_status='fail', runnable=False,
-            mime_type=UTF8_TEXT, eof=True, file_name="tap meta",
-            file_bytes=b"test missing from TAP output")
-    def _emit_test():
-        "write out a test"
-        if test_name is None:
-            return
-        if log:
-            log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log)
-            mime_type = UTF8_TEXT
-            file_name = 'tap comment'
-            eof = True
-        else:
-            log_bytes = None
-            mime_type = None
-            file_name = None
-            eof = True
-        del log[:]
-        output.status(test_id=test_name, test_status=result,
-            file_bytes=log_bytes, mime_type=mime_type, eof=eof,
-            file_name=file_name, runnable=False)
-    for line in tap:
-        if state == BEFORE_PLAN:
-            match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
-            if match:
-                state = AFTER_PLAN
-                _, plan_stop, comment = match.groups()
-                plan_stop = int(plan_stop)
-                if plan_start > plan_stop and plan_stop == 0:
-                    # skipped file
-                    state = SKIP_STREAM
-                    output.status(test_id='file skip', test_status='skip',
-                        file_bytes=comment.encode('utf8'), eof=True,
-                        file_name='tap comment')
-                continue
-        # not a plan line, or have seen one before
-        match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
-        if match:
-            # new test, emit current one.
-            _emit_test()
-            status, number, description, directive, directive_comment = match.groups()
-            if status == 'ok':
-                result = 'success'
-            else:
-                result = "fail"
-            if description is None:
-                description = ''
-            else:
-                description = ' ' + description
-            if directive is not None:
-                if directive.upper() == 'TODO':
-                    result = 'xfail'
-                elif directive.upper() == 'SKIP':
-                    result = 'skip'
-                if directive_comment is not None:
-                    log.append(directive_comment)
-            if number is not None:
-                number = int(number)
-                while plan_start < number:
-                    missing_test(plan_start)
-                    plan_start += 1
-            test_name = "test %d%s" % (plan_start, description)
-            plan_start += 1
-            continue
-        match = re.match("Bail out\!(?:\s*(.*))?\n", line)
-        if match:
-            reason, = match.groups()
-            if reason is None:
-                extra = ''
-            else:
-                extra = ' %s' % reason
-            _emit_test()
-            test_name = "Bail out!%s" % extra
-            result = "fail"
-            state = SKIP_STREAM
-            continue
-        match = re.match("\#.*\n", line)
-        if match:
-            log.append(line[:-1])
-            continue
-        # Should look at buffering status and binding this to the prior result.
-        output.status(file_bytes=line.encode('utf8'), file_name='stdout',
-            mime_type=UTF8_TEXT)
-    _emit_test()
-    while plan_start <= plan_stop:
-        # record missed tests
-        missing_test(plan_start)
-        plan_start += 1
-    return 0
-
-
-def tag_stream(original, filtered, tags):
-    """Alter tags on a stream.
-
-    :param original: The input stream.
-    :param filtered: The output stream.
-    :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
-        '-TAG' commands.
-
-        A 'TAG' command will add the tag to the output stream,
-        and override any existing '-TAG' command in that stream.
-        Specifically:
-         * A global 'tags: TAG' will be added to the start of the stream.
-         * Any tags commands with -TAG will have the -TAG removed.
-
-        A '-TAG' command will remove the TAG command from the stream.
-        Specifically:
-         * A 'tags: -TAG' command will be added to the start of the stream.
-         * Any 'tags: TAG' command will have 'TAG' removed from it.
-        Additionally, any redundant tagging commands (adding a tag globally
-        present, or removing a tag globally removed) are stripped as a
-        by-product of the filtering.
-    :return: 0
-    """
-    new_tags, gone_tags = tags_to_new_gone(tags)
-    source = ByteStreamToStreamResult(original, non_subunit_name='stdout')
-    class Tagger(CopyStreamResult):
-        def status(self, **kwargs):
-            tags = kwargs.get('test_tags')
-            if not tags:
-                tags = set()
-            tags.update(new_tags)
-            tags.difference_update(gone_tags)
-            if tags:
-                kwargs['test_tags'] = tags
-            else:
-                kwargs['test_tags'] = None
-            super(Tagger, self).status(**kwargs)
-    output = Tagger([StreamResultToBytes(filtered)])
-    source.run(output)
-    return 0
-
-
-class ProtocolTestCase(object):
-    """Subunit wire protocol to unittest.TestCase adapter.
-
-    ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
-    calling a ProtocolTestCase or invoking the run() method will make a 'test
-    run' happen. The 'test run' will simply be a replay of the test activity
-    that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
-    and ``countTestCases`` methods are not supported because there isn't a
-    sensible mapping for those methods.
-
-    # Get a stream (any object with a readline() method), in this case the
-    # stream output by the example from ``subunit.TestProtocolClient``.
-    stream = file('tests.log', 'rb')
-    # Create a parser which will read from the stream and emit
-    # activity to a unittest.TestResult when run() is called.
-    suite = subunit.ProtocolTestCase(stream)
-    # Create a result object to accept the contents of that stream.
-    result = unittest._TextTestResult(sys.stdout)
-    # 'run' the tests - process the stream and feed its contents to result.
-    suite.run(result)
-    stream.close()
-
-    :seealso: TestProtocolServer (the subunit wire protocol parser).
-    """
-
-    def __init__(self, stream, passthrough=None, forward=None):
-        """Create a ProtocolTestCase reading from stream.
-
-        :param stream: A filelike object which a subunit stream can be read
-            from.
-        :param passthrough: A stream pass non subunit input on to. If not
-            supplied, the TestProtocolServer default is used.
-        :param forward: A stream to pass subunit input on to. If not supplied
-            subunit input is not forwarded.
-        """
-        stream = make_stream_binary(stream)
-        self._stream = stream
-        self._passthrough = passthrough
-        if forward is not None:
-            forward = make_stream_binary(forward)
-        self._forward = forward
-
-    def __call__(self, result=None):
-        return self.run(result)
-
-    def run(self, result=None):
-        if result is None:
-            result = self.defaultTestResult()
-        protocol = TestProtocolServer(result, self._passthrough, self._forward)
-        line = self._stream.readline()
-        while line:
-            protocol.lineReceived(line)
-            line = self._stream.readline()
-        protocol.lostConnection()
-
-
-class TestResultStats(testresult.TestResult):
-    """A pyunit TestResult interface implementation for making statistics.
-
-    :ivar total_tests: The total tests seen.
-    :ivar passed_tests: The tests that passed.
-    :ivar failed_tests: The tests that failed.
-    :ivar seen_tags: The tags seen across all tests.
-    """
-
-    def __init__(self, stream):
-        """Create a TestResultStats which outputs to stream."""
-        testresult.TestResult.__init__(self)
-        self._stream = stream
-        self.failed_tests = 0
-        self.skipped_tests = 0
-        self.seen_tags = set()
-
-    @property
-    def total_tests(self):
-        return self.testsRun
-
-    def addError(self, test, err, details=None):
-        self.failed_tests += 1
-
-    def addFailure(self, test, err, details=None):
-        self.failed_tests += 1
-
-    def addSkip(self, test, reason, details=None):
-        self.skipped_tests += 1
-
-    def formatStats(self):
-        self._stream.write("Total tests:   %5d\n" % self.total_tests)
-        self._stream.write("Passed tests:  %5d\n" % self.passed_tests)
-        self._stream.write("Failed tests:  %5d\n" % self.failed_tests)
-        self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
-        tags = sorted(self.seen_tags)
-        self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
-
-    @property
-    def passed_tests(self):
-        return self.total_tests - self.failed_tests - self.skipped_tests
-
-    def tags(self, new_tags, gone_tags):
-        """Accumulate the seen tags."""
-        self.seen_tags.update(new_tags)
-
-    def wasSuccessful(self):
-        """Tells whether or not this result was a success"""
-        return self.failed_tests == 0
-
-
-def get_default_formatter():
-    """Obtain the default formatter to write to.
-
-    :return: A file-like object.
-    """
-    formatter = os.getenv("SUBUNIT_FORMATTER")
-    if formatter:
-        return os.popen(formatter, "w")
-    else:
-        stream = sys.stdout
-        if sys.version_info > (3, 0):
-            if safe_hasattr(stream, 'buffer'):
-                stream = stream.buffer
-        return stream
-
-
-def read_test_list(path):
-    """Read a list of test ids from a file on disk.
-
-    :param path: Path to the file
-    :return: Sequence of test ids
-    """
-    f = open(path, 'rb')
-    try:
-        return [l.rstrip("\n") for l in f.readlines()]
-    finally:
-        f.close()
-
-
-def make_stream_binary(stream):
-    """Ensure that a stream will be binary safe. See _make_binary_on_windows.
-    
-    :return: A binary version of the same stream (some streams cannot be
-        'fixed' but can be unwrapped).
-    """
-    try:
-        fileno = stream.fileno()
-    except (_UnsupportedOperation, AttributeError):
-        pass
-    else:
-        _make_binary_on_windows(fileno)
-    return _unwrap_text(stream)
-
-
-def _make_binary_on_windows(fileno):
-    """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
-    if sys.platform == "win32":
-        import msvcrt
-        msvcrt.setmode(fileno, os.O_BINARY)
-
-
-def _unwrap_text(stream):
-    """Unwrap stream if it is a text stream to get the original buffer."""
-    if sys.version_info > (3, 0):
-        unicode_type = str
-    else:
-        unicode_type = unicode
-    try:
-        # Read streams
-        if type(stream.read(0)) is unicode_type:
-            return stream.buffer
-    except (_UnsupportedOperation, IOError):
-        # Cannot read from the stream: try via writes
-        try:
-            stream.write(_b(''))
-        except TypeError:
-            return stream.buffer
-    return stream
diff --git a/lib/subunit/python/subunit/_output.py b/lib/subunit/python/subunit/_output.py
deleted file mode 100644
index aa92646..0000000
--- a/lib/subunit/python/subunit/_output.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2013 Subunit Contributors
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import datetime
-from functools import partial
-from optparse import (
-    OptionGroup,
-    OptionParser,
-    OptionValueError,
-)
-import sys
-
-from subunit import make_stream_binary
-from subunit.iso8601 import UTC
-from subunit.v2 import StreamResultToBytes
-
-
-_FINAL_ACTIONS = frozenset([
-    'exists',
-    'fail',
-    'skip',
-    'success',
-    'uxsuccess',
-    'xfail',
-])
-_ALL_ACTIONS = _FINAL_ACTIONS.union(['inprogress'])
-_CHUNK_SIZE=3670016 # 3.5 MiB
-
-
-def output_main():
-    args = parse_arguments()
-    output = StreamResultToBytes(sys.stdout)
-    generate_stream_results(args, output)
-    return 0
-
-
-def parse_arguments(args=None, ParserClass=OptionParser):
-    """Parse arguments from the command line.
-
-    If specified, args must be a list of strings, similar to sys.argv[1:].
-
-    ParserClass may be specified to override the class we use to parse the
-    command-line arguments. This is useful for testing.
-    """
-    parser = ParserClass(
-        prog="subunit-output",
-        description="A tool to generate a subunit v2 result byte-stream",
-        usage="subunit-output [-h] [status TEST_ID] [options]",
-    )
-    parser.set_default('tags', None)
-    parser.set_default('test_id', None)
-
-    status_commands = OptionGroup(
-        parser,
-        "Status Commands",
-        "These options report the status of a test. TEST_ID must be a string "
-            "that uniquely identifies the test."
-    )
-    for action_name in _ALL_ACTIONS:
-        status_commands.add_option(
-            "--%s" % action_name,
-            nargs=1,
-            action="callback",
-            callback=set_status_cb,
-            callback_args=(action_name,),
-            dest="action",
-            metavar="TEST_ID",
-            help="Report a test status."
-        )
-    parser.add_option_group(status_commands)
-
-    file_commands = OptionGroup(
-        parser,
-        "File Options",
-        "These options control attaching data to a result stream. They can "
-            "either be specified with a status command, in which case the file "
-            "is attached to the test status, or by themselves, in which case "
-            "the file is attached to the stream (and not associated with any "
-            "test id)."
-    )
-    file_commands.add_option(
-        "--attach-file",
-        help="Attach a file to the result stream for this test. If '-' is "
-            "specified, stdin will be read instead. In this case, the file "
-            "name will be set to 'stdin' (but can still be overridden with "
-            "the --file-name option)."
-    )
-    file_commands.add_option(
-        "--file-name",
-        help="The name to give this file attachment. If not specified, the "
-            "name of the file on disk will be used, or 'stdin' in the case "
-            "where '-' was passed to the '--attach-file' argument. This option"
-            " may only be specified when '--attach-file' is specified.",
-        )
-    file_commands.add_option(
-        "--mimetype",
-        help="The mime type to send with this file. This is only used if the "
-            "--attach-file argument is used. This argument is optional. If it "
-            "is not specified, the file will be sent without a mime type. This "
-            "option may only be specified when '--attach-file' is specified.",
-        default=None
-    )
-    parser.add_option_group(file_commands)
-
-    parser.add_option(
-        "--tag",
-        help="Specifies a tag. May be used multiple times",
-        action="append",
-        dest="tags",
-        default=[]
-    )
-
-    (options, args) = parser.parse_args(args)
-    if options.mimetype and not options.attach_file:
-        parser.error("Cannot specify --mimetype without --attach-file")
-    if options.file_name and not options.attach_file:
-        parser.error("Cannot specify --file-name without --attach-file")
-    if options.attach_file:
-        if options.attach_file == '-':
-            if not options.file_name:
-                options.file_name = 'stdin'
-                options.attach_file = make_stream_binary(sys.stdin)
-        else:
-            try:
-                options.attach_file = open(options.attach_file, 'rb')
-            except IOError as e:
-                parser.error("Cannot open %s (%s)" % (options.attach_file, e.strerror))
-
-    return options
-
-
-def set_status_cb(option, opt_str, value, parser, status_name):
-    if getattr(parser.values, "action", None) is not None:
-        raise OptionValueError("argument %s: Only one status may be specified at once." % opt_str)
-
-    if len(parser.rargs) == 0:
-        raise OptionValueError("argument %s: must specify a single TEST_ID." % opt_str)
-    parser.values.action = status_name
-    parser.values.test_id = parser.rargs.pop(0)
-
-
-def generate_stream_results(args, output_writer):
-    output_writer.startTestRun()
-
-    if args.attach_file:
-        reader = partial(args.attach_file.read, _CHUNK_SIZE)
-        this_file_hunk = reader()
-        next_file_hunk = reader()
-
-    is_first_packet = True
-    is_last_packet = False
-    while not is_last_packet:
-        write_status = output_writer.status
-
-        if is_first_packet:
-            if args.attach_file:
-                if args.mimetype:
-                    write_status = partial(write_status, mime_type=args.mimetype)
-            if args.tags:
-                write_status = partial(write_status, test_tags=set(args.tags))
-            write_status = partial(write_status, timestamp=create_timestamp())
-            if args.action not in _FINAL_ACTIONS:
-                write_status = partial(write_status, test_status=args.action)
-            is_first_packet = False
-
-        if args.attach_file:
-            filename = args.file_name or args.attach_file.name
-            write_status = partial(write_status, file_name=filename, file_bytes=this_file_hunk)
-            if next_file_hunk == b'':
-                write_status = partial(write_status, eof=True)
-                is_last_packet = True
-            else:
-                this_file_hunk = next_file_hunk
-                next_file_hunk = reader()
-        else:
-            is_last_packet = True
-
-        if args.test_id:
-            write_status = partial(write_status, test_id=args.test_id)
-
-        if is_last_packet:
-            if args.action in _FINAL_ACTIONS:
-                write_status = partial(write_status, test_status=args.action)
-
-        write_status()
-
-    output_writer.stopTestRun()
-
-
-def create_timestamp():
-    return datetime.datetime.now(UTC)
diff --git a/lib/subunit/python/subunit/chunked.py b/lib/subunit/python/subunit/chunked.py
deleted file mode 100644
index b992129..0000000
--- a/lib/subunit/python/subunit/chunked.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#  Copyright (C) 2011  Martin Pool <mbp at sourcefrog.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Encoder/decoder for http style chunked encoding."""
-
-from testtools.compat import _b
-
-empty = _b('')
-
-class Decoder(object):
-    """Decode chunked content to a byte stream."""
-
-    def __init__(self, output, strict=True):
-        """Create a decoder decoding to output.
-
-        :param output: A file-like object. Bytes written to the Decoder are
-            decoded to strip off the chunking and written to the output.
-            Up to a full write worth of data or a single control line may be
-            buffered (whichever is larger). The close method should be called
-            when no more data is available, to detect short streams; the
-            write method will return none-None when the end of a stream is
-            detected. The output object must accept bytes objects.
-
-        :param strict: If True (the default), the decoder will not knowingly
-            accept input that is not conformant to the HTTP specification.
-            (This does not imply that it will catch every nonconformance.)
-            If False, it will accept incorrect input that is still
-            unambiguous.
-        """
-        self.output = output
-        self.buffered_bytes = []
-        self.state = self._read_length
-        self.body_length = 0
-        self.strict = strict
-        self._match_chars = _b("0123456789abcdefABCDEF\r\n")
-        self._slash_n = _b('\n')
-        self._slash_r = _b('\r')
-        self._slash_rn = _b('\r\n')
-        self._slash_nr = _b('\n\r')
-
-    def close(self):
-        """Close the decoder.
-
-        :raises ValueError: If the stream is incomplete ValueError is raised.
-        """
-        if self.state != self._finished:
-            raise ValueError("incomplete stream")
-
-    def _finished(self):
-        """Finished reading, return any remaining bytes."""
-        if self.buffered_bytes:
-            buffered_bytes = self.buffered_bytes
-            self.buffered_bytes = []
-            return empty.join(buffered_bytes)
-        else:
-            raise ValueError("stream is finished")
-
-    def _read_body(self):
-        """Pass body bytes to the output."""
-        while self.body_length and self.buffered_bytes:
-            if self.body_length >= len(self.buffered_bytes[0]):
-                self.output.write(self.buffered_bytes[0])
-                self.body_length -= len(self.buffered_bytes[0])
-                del self.buffered_bytes[0]
-                # No more data available.
-                if not self.body_length:
-                    self.state = self._read_length
-            else:
-                self.output.write(self.buffered_bytes[0][:self.body_length])
-                self.buffered_bytes[0] = \
-                    self.buffered_bytes[0][self.body_length:]
-                self.body_length = 0
-                self.state = self._read_length
-                return self.state()
-
-    def _read_length(self):
-        """Try to decode a length from the bytes."""
-        count_chars = []
-        for bytes in self.buffered_bytes:
-            for pos in range(len(bytes)):
-                byte = bytes[pos:pos+1]
-                if byte not in self._match_chars:
-                    break
-                count_chars.append(byte)
-                if byte == self._slash_n:
-                    break
-        if not count_chars:
-            return
-        if count_chars[-1] != self._slash_n:
-            return
-        count_str = empty.join(count_chars)
-        if self.strict:
-            if count_str[-2:] != self._slash_rn:
-                raise ValueError("chunk header invalid: %r" % count_str)
-            if self._slash_r in count_str[:-2]:
-                raise ValueError("too many CRs in chunk header %r" % count_str)
-        self.body_length = int(count_str.rstrip(self._slash_nr), 16)
-        excess_bytes = len(count_str)
-        while excess_bytes:
-            if excess_bytes >= len(self.buffered_bytes[0]):
-                excess_bytes -= len(self.buffered_bytes[0])
-                del self.buffered_bytes[0]
-            else:
-                self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
-                excess_bytes = 0
-        if not self.body_length:
-            self.state = self._finished
-            if not self.buffered_bytes:
-                # May not call into self._finished with no buffered data.
-                return empty
-        else:
-            self.state = self._read_body
-        return self.state()
-
-    def write(self, bytes):
-        """Decode bytes to the output stream.
-
-        :raises ValueError: If the stream has already seen the end of file
-            marker.
-        :returns: None, or the excess bytes beyond the end of file marker.
-        """
-        if bytes:
-            self.buffered_bytes.append(bytes)
-        return self.state()
-
-
-class Encoder(object):
-    """Encode content to a stream using HTTP Chunked coding."""
-
-    def __init__(self, output):
-        """Create an encoder encoding to output.
-
-        :param output: A file-like object. Bytes written to the Encoder
-            will be encoded using HTTP chunking. Small writes may be buffered
-            and the ``close`` method must be called to finish the stream.
-        """
-        self.output = output
-        self.buffered_bytes = []
-        self.buffer_size = 0
-
-    def flush(self, extra_len=0):
-        """Flush the encoder to the output stream.
-
-        :param extra_len: Increase the size of the chunk by this many bytes
-            to allow for a subsequent write.
-        """
-        if not self.buffer_size and not extra_len:
-            return
-        buffered_bytes = self.buffered_bytes
-        buffer_size = self.buffer_size
-        self.buffered_bytes = []
-        self.buffer_size = 0
-        self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
-        if buffer_size:
-            self.output.write(empty.join(buffered_bytes))
-        return True
-
-    def write(self, bytes):
-        """Encode bytes to the output stream."""
-        bytes_len = len(bytes)
-        if self.buffer_size + bytes_len >= 65536:
-            self.flush(bytes_len)
-            self.output.write(bytes)
-        else:
-            self.buffered_bytes.append(bytes)
-            self.buffer_size += bytes_len
-
-    def close(self):
-        """Finish the stream. This does not close the output stream."""
-        self.flush()
-        self.output.write(_b("0\r\n"))
diff --git a/lib/subunit/python/subunit/details.py b/lib/subunit/python/subunit/details.py
deleted file mode 100644
index 9e5e005..0000000
--- a/lib/subunit/python/subunit/details.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Handlers for outcome details."""
-
-from testtools import content, content_type
-from testtools.compat import _b, BytesIO
-
-from subunit import chunked
-
-end_marker = _b("]\n")
-quoted_marker = _b(" ]")
-empty = _b('')
-
-
-class DetailsParser(object):
-    """Base class/API reference for details parsing."""
-
-
-class SimpleDetailsParser(DetailsParser):
-    """Parser for single-part [] delimited details."""
-
-    def __init__(self, state):
-        self._message = _b("")
-        self._state = state
-
-    def lineReceived(self, line):
-        if line == end_marker:
-            self._state.endDetails()
-            return
-        if line[0:2] == quoted_marker:
-            # quoted ] start
-            self._message += line[1:]
-        else:
-            self._message += line
-
-    def get_details(self, style=None):
-        result = {}
-        if not style:
-            # We know that subunit/testtools serialise [] formatted
-            # tracebacks as utf8, but perhaps we need a ReplacingContent
-            # or something like that.
-            result['traceback'] = content.Content(
-                content_type.ContentType("text", "x-traceback",
-                {"charset": "utf8"}),
-                lambda:[self._message])
-        else:
-            if style == 'skip':
-                name = 'reason'
-            else:
-                name = 'message'
-            result[name] = content.Content(
-                content_type.ContentType("text", "plain"),
-                lambda:[self._message])
-        return result
-
-    def get_message(self):
-        return self._message
-
-
-class MultipartDetailsParser(DetailsParser):
-    """Parser for multi-part [] surrounded MIME typed chunked details."""
-
-    def __init__(self, state):
-        self._state = state
-        self._details = {}
-        self._parse_state = self._look_for_content
-
-    def _look_for_content(self, line):
-        if line == end_marker:
-            self._state.endDetails()
-            return
-        # TODO error handling
-        field, value = line[:-1].decode('utf8').split(' ', 1)
-        try:
-            main, sub = value.split('/')
-        except ValueError:
-            raise ValueError("Invalid MIME type %r" % value)
-        self._content_type = content_type.ContentType(main, sub)
-        self._parse_state = self._get_name
-
-    def _get_name(self, line):
-        self._name = line[:-1].decode('utf8')
-        self._body = BytesIO()
-        self._chunk_parser = chunked.Decoder(self._body)
-        self._parse_state = self._feed_chunks
-
-    def _feed_chunks(self, line):
-        residue = self._chunk_parser.write(line)
-        if residue is not None:
-            # Line based use always ends on no residue.
-            assert residue == empty, 'residue: %r' % (residue,)
-            body = self._body
-            self._details[self._name] = content.Content(
-                self._content_type, lambda:[body.getvalue()])
-            self._chunk_parser.close()
-            self._parse_state = self._look_for_content
-
-    def get_details(self, for_skip=False):
-        return self._details
-
-    def get_message(self):
-        return None
-
-    def lineReceived(self, line):
-        self._parse_state(line)
diff --git a/lib/subunit/python/subunit/filters.py b/lib/subunit/python/subunit/filters.py
deleted file mode 100644
index 0a0a185..0000000
--- a/lib/subunit/python/subunit/filters.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-
-from optparse import OptionParser
-import sys
-
-from extras import safe_hasattr
-from testtools import CopyStreamResult, StreamResult, StreamResultRouter
-
-from subunit import (
-    DiscardStream, ProtocolTestCase, ByteStreamToStreamResult,
-    StreamResultToBytes,
-    )
-from subunit.test_results import CatFiles
-
-
-def make_options(description):
-    parser = OptionParser(description=description)
-    parser.add_option(
-        "--no-passthrough", action="store_true",
-        help="Hide all non subunit input.", default=False,
-        dest="no_passthrough")
-    parser.add_option(
-        "-o", "--output-to",
-        help="Send the output to this path rather than stdout.")
-    parser.add_option(
-        "-f", "--forward", action="store_true", default=False,
-        help="Forward subunit stream on stdout. When set, received "
-            "non-subunit output will be encapsulated in subunit.")
-    return parser
-
-
-def run_tests_from_stream(input_stream, result, passthrough_stream=None,
-    forward_stream=None, protocol_version=1, passthrough_subunit=True):
-    """Run tests from a subunit input stream through 'result'.
-
-    Non-test events - top level file attachments - are expected to be
-    dropped by v2 StreamResults at the present time (as all the analysis code
-    is in ExtendedTestResult API's), so to implement passthrough_stream they
-    are diverted and copied directly when that is set.
-
-    :param input_stream: A stream containing subunit input.
-    :param result: A TestResult that will receive the test events.
-        NB: This should be an ExtendedTestResult for v1 and a StreamResult for
-        v2.
-    :param passthrough_stream: All non-subunit input received will be
-        sent to this stream.  If not provided, uses the ``TestProtocolServer``
-        default, which is ``sys.stdout``.
-    :param forward_stream: All subunit input received will be forwarded
-        to this stream. If not provided, uses the ``TestProtocolServer``
-        default, which is to not forward any input. Do not set this when
-        transforming the stream - items would be double-reported.
-    :param protocol_version: What version of the subunit protocol to expect.
-    :param passthrough_subunit: If True, passthrough should be as subunit
-        otherwise unwrap it. Only has effect when forward_stream is None.
-        (when forwarding as subunit non-subunit input is always turned into
-        subunit)
-    """
-    if 1==protocol_version:
-        test = ProtocolTestCase(
-            input_stream, passthrough=passthrough_stream,
-            forward=forward_stream)
-    elif 2==protocol_version:
-        # In all cases we encapsulate unknown inputs.
-        if forward_stream is not None:
-            # Send events to forward_stream as subunit.
-            forward_result = StreamResultToBytes(forward_stream)
-            # If we're passing non-subunit through, copy:
-            if passthrough_stream is None:
-                # Not passing non-test events - split them off to nothing.
-                router = StreamResultRouter(forward_result)
-                router.add_rule(StreamResult(), 'test_id', test_id=None)
-                result = CopyStreamResult([router, result])
-            else:
-                # otherwise, copy all events to forward_result
-                result = CopyStreamResult([forward_result, result])
-        elif passthrough_stream is not None:
-            if not passthrough_subunit:
-                # Route non-test events to passthrough_stream, unwrapping them for
-                # display.
-                passthrough_result = CatFiles(passthrough_stream)
-            else:
-                passthrough_result = StreamResultToBytes(passthrough_stream)
-            result = StreamResultRouter(result)
-            result.add_rule(passthrough_result, 'test_id', test_id=None)
-        test = ByteStreamToStreamResult(input_stream,
-            non_subunit_name='stdout')
-    else:
-        raise Exception("Unknown protocol version.")
-    result.startTestRun()
-    test.run(result)
-    result.stopTestRun()
-
-
-def filter_by_result(result_factory, output_path, passthrough, forward,
-                     input_stream=sys.stdin, protocol_version=1,
-                     passthrough_subunit=True):
-    """Filter an input stream using a test result.
-
-    :param result_factory: A callable that when passed an output stream
-        returns a TestResult.  It is expected that this result will output
-        to the given stream.
-    :param output_path: A path send output to.  If None, output will be go
-        to ``sys.stdout``.
-    :param passthrough: If True, all non-subunit input will be sent to
-        ``sys.stdout``.  If False, that input will be discarded.
-    :param forward: If True, all subunit input will be forwarded directly to
-        ``sys.stdout`` as well as to the ``TestResult``.
-    :param input_stream: The source of subunit input.  Defaults to
-        ``sys.stdin``.
-    :param protocol_version: The subunit protocol version to expect.
-    :param passthrough_subunit: If True, passthrough should be as subunit.
-    :return: A test result with the results of the run.
-    """
-    if passthrough:
-        passthrough_stream = sys.stdout
-    else:
-        if 1==protocol_version:
-            passthrough_stream = DiscardStream()
-        else:
-            passthrough_stream = None
-
-    if forward:
-        forward_stream = sys.stdout
-    elif 1==protocol_version:
-        forward_stream = DiscardStream()
-    else:
-        forward_stream = None
-
-    if output_path is None:
-        output_to = sys.stdout
-    else:
-        output_to = file(output_path, 'wb')
-
-    try:
-        result = result_factory(output_to)
-        run_tests_from_stream(
-            input_stream, result, passthrough_stream, forward_stream,
-            protocol_version=protocol_version,
-            passthrough_subunit=passthrough_subunit)
-    finally:
-        if output_path:
-            output_to.close()
-    return result
-
-
-def run_filter_script(result_factory, description, post_run_hook=None,
-    protocol_version=1, passthrough_subunit=True):
-    """Main function for simple subunit filter scripts.
-
-    Many subunit filter scripts take a stream of subunit input and use a
-    TestResult to handle the events generated by that stream.  This function
-    wraps a lot of the boiler-plate around that by making a script with
-    options for handling passthrough information and stream forwarding, and
-    that will exit with a successful return code (i.e. 0) if the input stream
-    represents a successful test run.
-
-    :param result_factory: A callable that takes an output stream and returns
-        a test result that outputs to that stream.
-    :param description: A description of the filter script.
-    :param protocol_version: What protocol version to consume/emit.
-    :param passthrough_subunit: If True, passthrough should be as subunit.
-    """
-    parser = make_options(description)
-    (options, args) = parser.parse_args()
-    result = filter_by_result(
-        result_factory, options.output_to, not options.no_passthrough,
-        options.forward, protocol_version=protocol_version,
-        passthrough_subunit=passthrough_subunit,
-        input_stream=find_stream(sys.stdin, args))
-    if post_run_hook:
-        post_run_hook(result)
-    if not safe_hasattr(result, 'wasSuccessful'):
-        result = result.decorated
-    if result.wasSuccessful():
-        sys.exit(0)
-    else:
-        sys.exit(1)
-
-
-def find_stream(stdin, argv):
-    """Find a stream to use as input for filters.
-
-    :param stdin: Standard in - used if no files are named in argv.
-    :param argv: Command line arguments after option parsing. If one file
-        is named, that is opened in read only binary mode and returned.
-        A missing file will raise an exception, as will multiple file names.
-    """
-    assert len(argv) < 2, "Too many filenames."
-    if argv:
-        return open(argv[0], 'rb')
-    else:
-        return stdin
diff --git a/lib/subunit/python/subunit/iso8601.py b/lib/subunit/python/subunit/iso8601.py
deleted file mode 100644
index 07855d0..0000000
--- a/lib/subunit/python/subunit/iso8601.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (c) 2007 Michael Twomey
-# 
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-# 
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-# 
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-"""ISO 8601 date time string parsing
-
-Basic usage:
->>> import iso8601
->>> iso8601.parse_date("2007-01-25T12:00:00Z")
-datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
->>>
-
-"""
-
-from datetime import datetime, timedelta, tzinfo
-import re
-import sys
-
-__all__ = ["parse_date", "ParseError"]
-
-# Adapted from http://delete.me.uk/2005/03/iso8601.html
-ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
-    r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
-    r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
-)
-TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
-ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
-TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
-
-zulu = "Z".encode('latin-1')
-minus = "-".encode('latin-1')
-
-if sys.version_info < (3, 0):
-    bytes = str
-
-
-class ParseError(Exception):
-    """Raised when there is a problem parsing a date string"""
-
-# Yoinked from python docs
-ZERO = timedelta(0)
-class Utc(tzinfo):
-    """UTC
-    
-    """
-    def utcoffset(self, dt):
-        return ZERO
-
-    def tzname(self, dt):
-        return "UTC"
-
-    def dst(self, dt):
-        return ZERO
-UTC = Utc()
-
-class FixedOffset(tzinfo):
-    """Fixed offset in hours and minutes from UTC
-    
-    """
-    def __init__(self, offset_hours, offset_minutes, name):
-        self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
-        self.__name = name
-
-    def utcoffset(self, dt):
-        return self.__offset
-
-    def tzname(self, dt):
-        return self.__name
-
-    def dst(self, dt):
-        return ZERO
-    
-    def __repr__(self):
-        return "<FixedOffset %r>" % self.__name
-
-def parse_timezone(tzstring, default_timezone=UTC):
-    """Parses ISO 8601 time zone specs into tzinfo offsets
-    
-    """
-    if tzstring == zulu:
-        return default_timezone
-    # This isn't strictly correct, but it's common to encounter dates without
-    # timezones so I'll assume the default (which defaults to UTC).
-    # Addresses issue 4.
-    if tzstring is None:
-        return default_timezone
-    m = TIMEZONE_REGEX.match(tzstring)
-    prefix, hours, minutes = m.groups()
-    hours, minutes = int(hours), int(minutes)
-    if prefix == minus:
-        hours = -hours
-        minutes = -minutes
-    return FixedOffset(hours, minutes, tzstring)
-
-def parse_date(datestring, default_timezone=UTC):
-    """Parses ISO 8601 dates into datetime objects
-    
-    The timezone is parsed from the date string. However it is quite common to
-    have dates without a timezone (not strictly correct). In this case the
-    default timezone specified in default_timezone is used. This is UTC by
-    default.
-    """
-    if not isinstance(datestring, bytes):
-        raise ParseError("Expecting bytes %r" % datestring)
-    m = ISO8601_REGEX.match(datestring)
-    if not m:
-        raise ParseError("Unable to parse date string %r" % datestring)
-    groups = m.groupdict()
-    tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
-    if groups["fraction"] is None:
-        groups["fraction"] = 0
-    else:
-        groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
-    return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
-        int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
-        int(groups["fraction"]), tz)
diff --git a/lib/subunit/python/subunit/progress_model.py b/lib/subunit/python/subunit/progress_model.py
deleted file mode 100644
index 3a6af89a..0000000
--- a/lib/subunit/python/subunit/progress_model.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Support for dealing with progress state."""
-
-class ProgressModel(object):
-    """A model of progress indicators as subunit defines it.
-    
-    Instances of this class represent a single logical operation that is
-    progressing. The operation may have many steps, and some of those steps may
-    supply their own progress information. ProgressModel uses a nested concept
-    where the overall state can be pushed, creating new starting state, and
-    later pushed to return to the prior state. Many user interfaces will want
-    to display an overall summary though, and accordingly the pos() and width()
-    methods return overall summary information rather than information on the
-    current subtask.
-
-    The default state is 0/0 - indicating that the overall progress is unknown.
-    Anytime the denominator of pos/width is 0, rendering of a ProgressModel
-    should should take this into consideration.
-
-    :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
-        pos, width, overall_numerator, overall_denominator. The overall fields
-        store the calculated overall numerator and denominator for the state
-        that was pushed.
-    """
-
-    def __init__(self):
-        """Create a ProgressModel.
-        
-        The new model has no progress data at all - it will claim a summary
-        width of zero and position of 0.
-        """
-        self._tasks = []
-        self.push()
-
-    def adjust_width(self, offset):
-        """Adjust the with of the current subtask."""
-        self._tasks[-1][1] += offset
-
-    def advance(self):
-        """Advance the current subtask."""
-        self._tasks[-1][0] += 1
-
-    def pop(self):
-        """Pop a subtask off the ProgressModel.
-
-        See push for a description of how push and pop work.
-        """
-        self._tasks.pop()
-
-    def pos(self):
-        """Return how far through the operation has progressed."""
-        if not self._tasks:
-            return 0
-        task = self._tasks[-1]
-        if len(self._tasks) > 1:
-            # scale up the overall pos by the current task or preserve it if
-            # no current width is known.
-            offset = task[2] * (task[1] or 1)
-        else:
-            offset = 0
-        return offset + task[0]
-
-    def push(self):
-        """Push a new subtask.
-
-        After pushing a new subtask, the overall progress hasn't changed. Calls
-        to adjust_width, advance, set_width will only after the progress within
-        the range that calling 'advance' would have before - the subtask
-        represents progressing one step in the earlier task.
-
-        Call pop() to restore the progress model to the state before push was
-        called.
-        """
-        self._tasks.append([0, 0, self.pos(), self.width()])
-
-    def set_width(self, width):
-        """Set the width of the current subtask."""
-        self._tasks[-1][1] = width
-
-    def width(self):
-        """Return the total width of the operation."""
-        if not self._tasks:
-            return 0
-        task = self._tasks[-1]
-        if len(self._tasks) > 1:
-            # scale up the overall width by the current task or preserve it if
-            # no current width is known.
-            return task[3] * (task[1] or 1)
-        else:
-            return task[1]
-
diff --git a/lib/subunit/python/subunit/run.py b/lib/subunit/python/subunit/run.py
deleted file mode 100755
index cf9cc01..0000000
--- a/lib/subunit/python/subunit/run.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-#
-# Simple subunit testrunner for python
-# Copyright (C) Jelmer Vernooij <jelmer at samba.org> 2007
-#   
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Run a unittest testcase reporting results as Subunit.
-
-  $ python -m subunit.run mylib.tests.test_suite
-"""
-
-import io
-import os
-import sys
-
-from testtools import ExtendedToStreamDecorator
-from testtools.testsuite import iterate_tests
-
-from subunit import StreamResultToBytes, get_default_formatter
-from subunit.test_results import AutoTimingTestResultDecorator
-from testtools.run import (
-    BUFFEROUTPUT,
-    CATCHBREAK,
-    FAILFAST,
-    list_test,
-    TestProgram,
-    USAGE_AS_MAIN,
-    )
-
-
-class SubunitTestRunner(object):
-    def __init__(self, verbosity=None, failfast=None, buffer=None, stream=None,
-        stdout=None):
-        """Create a TestToolsTestRunner.
-
-        :param verbosity: Ignored.
-        :param failfast: Stop running tests at the first failure.
-        :param buffer: Ignored.
-        :param stream: Upstream unittest stream parameter.
-        :param stdout: Testtools stream parameter.
-
-        Either stream or stdout can be supplied, and stream will take
-        precedence.
-        """
-        self.failfast = failfast
-        self.stream = stream or stdout or sys.stdout
-
-    def run(self, test):
-        "Run the given test case or test suite."
-        result, _ = self._list(test)
-        result = ExtendedToStreamDecorator(result)
-        result = AutoTimingTestResultDecorator(result)
-        if self.failfast is not None:
-            result.failfast = self.failfast
-        result.startTestRun()
-        try:
-            test(result)
-        finally:
-            result.stopTestRun()
-        return result
-
-    def list(self, test):
-        "List the test."
-        result, errors = self._list(test)
-        if errors:
-            failed_descr = '\n'.join(errors).encode('utf8')
-            result.status(file_name="import errors", runnable=False,
-                file_bytes=failed_descr, mime_type="text/plain;charset=utf8")
-            sys.exit(2)
-
-    def _list(self, test):
-        test_ids, errors = list_test(test)
-        try:
-            fileno = self.stream.fileno()
-        except:
-            fileno = None
-        if fileno is not None:
-            stream = os.fdopen(fileno, 'wb', 0)
-        else:
-            stream = self.stream
-        result = StreamResultToBytes(stream)
-        for test_id in test_ids:
-            result.status(test_id=test_id, test_status='exists')
-        return result, errors
-
-
-class SubunitTestProgram(TestProgram):
-
-    USAGE = USAGE_AS_MAIN
-
-    def usageExit(self, msg=None):
-        if msg:
-            print (msg)
-        usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
-                 'buffer': ''}
-        if self.failfast != False:
-            usage['failfast'] = FAILFAST
-        if self.catchbreak != False:
-            usage['catchbreak'] = CATCHBREAK
-        if self.buffer != False:
-            usage['buffer'] = BUFFEROUTPUT
-        usage_text = self.USAGE % usage
-        usage_lines = usage_text.split('\n')
-        usage_lines.insert(2, "Run a test suite with a subunit reporter.")
-        usage_lines.insert(3, "")
-        print('\n'.join(usage_lines))
-        sys.exit(2)
-
-
-def main(argv=None, stdout=None):
-    if argv is None:
-        argv = sys.argv
-    runner = SubunitTestRunner
-    # stdout is None except in unit tests.
-    if stdout is None:
-        stdout = sys.stdout
-        # XXX: This is broken code- SUBUNIT_FORMATTER is not being honoured.
-        stream = get_default_formatter()
-        # Disable the default buffering, for Python 2.x where pdb doesn't do it
-        # on non-ttys.
-        if hasattr(stdout, 'fileno'):
-            # Patch stdout to be unbuffered, so that pdb works well on 2.6/2.7.
-            binstdout = io.open(stdout.fileno(), 'wb', 0)
-            if sys.version_info[0] > 2:
-                sys.stdout = io.TextIOWrapper(binstdout, encoding=sys.stdout.encoding)
-            else:
-                sys.stdout = binstdout
-            stdout = sys.stdout
-    SubunitTestProgram(module=None, argv=argv, testRunner=runner,
-        stdout=stdout, exit=False)
-
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/subunit/python/subunit/test_results.py b/lib/subunit/python/subunit/test_results.py
deleted file mode 100644
index b3ca968..0000000
--- a/lib/subunit/python/subunit/test_results.py
+++ /dev/null
@@ -1,728 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""TestResult helper classes used to by subunit."""
-
-import csv
-import datetime
-
-import testtools
-from testtools.content import (
-    text_content,
-    TracebackContent,
-    )
-from testtools import StreamResult
-
-from subunit import iso8601
-import subunit
-
-
-# NOT a TestResult, because we are implementing the interface, not inheriting
-# it.
-class TestResultDecorator(object):
-    """General pass-through decorator.
-
-    This provides a base that other TestResults can inherit from to
-    gain basic forwarding functionality. It also takes care of
-    handling the case where the target doesn't support newer methods
-    or features by degrading them.
-    """
-
-    # XXX: Since lp:testtools r250, this is in testtools. Once it's released,
-    # we should gut this and just use that.
-
-    def __init__(self, decorated):
-        """Create a TestResultDecorator forwarding to decorated."""
-        # Make every decorator degrade gracefully.
-        self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
-
-    def startTest(self, test):
-        return self.decorated.startTest(test)
-
-    def startTestRun(self):
-        return self.decorated.startTestRun()
-
-    def stopTest(self, test):
-        return self.decorated.stopTest(test)
-
-    def stopTestRun(self):
-        return self.decorated.stopTestRun()
-
-    def addError(self, test, err=None, details=None):
-        return self.decorated.addError(test, err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        return self.decorated.addFailure(test, err, details=details)
-
-    def addSuccess(self, test, details=None):
-        return self.decorated.addSuccess(test, details=details)
-
-    def addSkip(self, test, reason=None, details=None):
-        return self.decorated.addSkip(test, reason, details=details)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        return self.decorated.addExpectedFailure(test, err, details=details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        return self.decorated.addUnexpectedSuccess(test, details=details)
-
-    def _get_failfast(self):
-        return getattr(self.decorated, 'failfast', False)
-
-    def _set_failfast(self, value):
-        self.decorated.failfast = value
-    failfast = property(_get_failfast, _set_failfast)
-
-    def progress(self, offset, whence):
-        return self.decorated.progress(offset, whence)
-
-    def wasSuccessful(self):
-        return self.decorated.wasSuccessful()
-
-    @property
-    def shouldStop(self):
-        return self.decorated.shouldStop
-
-    def stop(self):
-        return self.decorated.stop()
-
-    @property
-    def testsRun(self):
-        return self.decorated.testsRun
-
-    def tags(self, new_tags, gone_tags):
-        return self.decorated.tags(new_tags, gone_tags)
-
-    def time(self, a_datetime):
-        return self.decorated.time(a_datetime)
-
-
-class HookedTestResultDecorator(TestResultDecorator):
-    """A TestResult which calls a hook on every event."""
-
-    def __init__(self, decorated):
-        self.super = super(HookedTestResultDecorator, self)
-        self.super.__init__(decorated)
-
-    def startTest(self, test):
-        self._before_event()
-        return self.super.startTest(test)
-
-    def startTestRun(self):
-        self._before_event()
-        return self.super.startTestRun()
-
-    def stopTest(self, test):
-        self._before_event()
-        return self.super.stopTest(test)
-
-    def stopTestRun(self):
-        self._before_event()
-        return self.super.stopTestRun()
-
-    def addError(self, test, err=None, details=None):
-        self._before_event()
-        return self.super.addError(test, err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        self._before_event()
-        return self.super.addFailure(test, err, details=details)
-
-    def addSuccess(self, test, details=None):
-        self._before_event()
-        return self.super.addSuccess(test, details=details)
-
-    def addSkip(self, test, reason=None, details=None):
-        self._before_event()
-        return self.super.addSkip(test, reason, details=details)
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._before_event()
-        return self.super.addExpectedFailure(test, err, details=details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self._before_event()
-        return self.super.addUnexpectedSuccess(test, details=details)
-
-    def progress(self, offset, whence):
-        self._before_event()
-        return self.super.progress(offset, whence)
-
-    def wasSuccessful(self):
-        self._before_event()
-        return self.super.wasSuccessful()
-
-    @property
-    def shouldStop(self):
-        self._before_event()
-        return self.super.shouldStop
-
-    def stop(self):
-        self._before_event()
-        return self.super.stop()
-
-    def time(self, a_datetime):
-        self._before_event()
-        return self.super.time(a_datetime)
-
-
-class AutoTimingTestResultDecorator(HookedTestResultDecorator):
-    """Decorate a TestResult to add time events to a test run.
-
-    By default this will cause a time event before every test event,
-    but if explicit time data is being provided by the test run, then
-    this decorator will turn itself off to prevent causing confusion.
-    """
-
-    def __init__(self, decorated):
-        self._time = None
-        super(AutoTimingTestResultDecorator, self).__init__(decorated)
-
-    def _before_event(self):
-        time = self._time
-        if time is not None:
-            return
-        time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
-        self.decorated.time(time)
-
-    def progress(self, offset, whence):
-        return self.decorated.progress(offset, whence)
-
-    @property
-    def shouldStop(self):
-        return self.decorated.shouldStop
-
-    def time(self, a_datetime):
-        """Provide a timestamp for the current test activity.
-
-        :param a_datetime: If None, automatically add timestamps before every
-            event (this is the default behaviour if time() is not called at
-            all).  If not None, pass the provided time onto the decorated
-            result object and disable automatic timestamps.
-        """
-        self._time = a_datetime
-        return self.decorated.time(a_datetime)
-
-
-class TagsMixin(object):
-
-    def __init__(self):
-        self._clear_tags()
-
-    def _clear_tags(self):
-        self._global_tags = set(), set()
-        self._test_tags = None
-
-    def _get_active_tags(self):
-        global_new, global_gone = self._global_tags
-        if self._test_tags is None:
-            return set(global_new)
-        test_new, test_gone = self._test_tags
-        return global_new.difference(test_gone).union(test_new)
-
-    def _get_current_scope(self):
-        if self._test_tags:
-            return self._test_tags
-        return self._global_tags
-
-    def _flush_current_scope(self, tag_receiver):
-        new_tags, gone_tags = self._get_current_scope()
-        if new_tags or gone_tags:
-            tag_receiver.tags(new_tags, gone_tags)
-        if self._test_tags:
-            self._test_tags = set(), set()
-        else:
-            self._global_tags = set(), set()
-
-    def startTestRun(self):
-        self._clear_tags()
-
-    def startTest(self, test):
-        self._test_tags = set(), set()
-
-    def stopTest(self, test):
-        self._test_tags = None
-
-    def tags(self, new_tags, gone_tags):
-        """Handle tag instructions.
-
-        Adds and removes tags as appropriate. If a test is currently running,
-        tags are not affected for subsequent tests.
-
-        :param new_tags: Tags to add,
-        :param gone_tags: Tags to remove.
-        """
-        current_new_tags, current_gone_tags = self._get_current_scope()
-        current_new_tags.update(new_tags)
-        current_new_tags.difference_update(gone_tags)
-        current_gone_tags.update(gone_tags)
-        current_gone_tags.difference_update(new_tags)
-
-
-class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
-    """Collapses many 'tags' calls into one where possible."""
-
-    def __init__(self, result):
-        super(TagCollapsingDecorator, self).__init__(result)
-        self._clear_tags()
-
-    def _before_event(self):
-        self._flush_current_scope(self.decorated)
-
-    def tags(self, new_tags, gone_tags):
-        TagsMixin.tags(self, new_tags, gone_tags)
-
-
-class TimeCollapsingDecorator(HookedTestResultDecorator):
-    """Only pass on the first and last of a consecutive sequence of times."""
-
-    def __init__(self, decorated):
-        super(TimeCollapsingDecorator, self).__init__(decorated)
-        self._last_received_time = None
-        self._last_sent_time = None
-
-    def _before_event(self):
-        if self._last_received_time is None:
-            return
-        if self._last_received_time != self._last_sent_time:
-            self.decorated.time(self._last_received_time)
-            self._last_sent_time = self._last_received_time
-        self._last_received_time = None
-
-    def time(self, a_time):
-        # Don't upcall, because we don't want to call _before_event, it's only
-        # for non-time events.
-        if self._last_received_time is None:
-            self.decorated.time(a_time)
-            self._last_sent_time = a_time
-        self._last_received_time = a_time
-
-
-def and_predicates(predicates):
-    """Return a predicate that is true iff all predicates are true."""
-    # XXX: Should probably be in testtools to be better used by matchers. jml
-    return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
-
-
-def make_tag_filter(with_tags, without_tags):
-    """Make a callback that checks tests against tags."""
-
-    with_tags = with_tags and set(with_tags) or None
-    without_tags = without_tags and set(without_tags) or None
-
-    def check_tags(test, outcome, err, details, tags):
-        if with_tags and not with_tags <= tags:
-            return False
-        if without_tags and bool(without_tags & tags):
-            return False
-        return True
-
-    return check_tags
-
-
-class _PredicateFilter(TestResultDecorator, TagsMixin):
-
-    def __init__(self, result, predicate):
-        super(_PredicateFilter, self).__init__(result)
-        self._clear_tags()
-        self.decorated = TimeCollapsingDecorator(
-            TagCollapsingDecorator(self.decorated))
-        self._predicate = predicate
-        # The current test (for filtering tags)
-        self._current_test = None
-        # Has the current test been filtered (for outputting test tags)
-        self._current_test_filtered = None
-        # Calls to this result that we don't know whether to forward on yet.
-        self._buffered_calls = []
-
-    def filter_predicate(self, test, outcome, error, details):
-        return self._predicate(
-            test, outcome, error, details, self._get_active_tags())
-
-    def addError(self, test, err=None, details=None):
-        if (self.filter_predicate(test, 'error', err, details)):
-            self._buffered_calls.append(
-                ('addError', [test, err], {'details': details}))
-        else:
-            self._filtered()
-
-    def addFailure(self, test, err=None, details=None):
-        if (self.filter_predicate(test, 'failure', err, details)):
-            self._buffered_calls.append(
-                ('addFailure', [test, err], {'details': details}))
-        else:
-            self._filtered()
-
-    def addSkip(self, test, reason=None, details=None):
-        if (self.filter_predicate(test, 'skip', reason, details)):
-            self._buffered_calls.append(
-                ('addSkip', [test, reason], {'details': details}))
-        else:
-            self._filtered()
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        if self.filter_predicate(test, 'expectedfailure', err, details):
-            self._buffered_calls.append(
-                ('addExpectedFailure', [test, err], {'details': details}))
-        else:
-            self._filtered()
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self._buffered_calls.append(
-            ('addUnexpectedSuccess', [test], {'details': details}))
-
-    def addSuccess(self, test, details=None):
-        if (self.filter_predicate(test, 'success', None, details)):
-            self._buffered_calls.append(
-                ('addSuccess', [test], {'details': details}))
-        else:
-            self._filtered()
-
-    def _filtered(self):
-        self._current_test_filtered = True
-
-    def startTest(self, test):
-        """Start a test.
-
-        Not directly passed to the client, but used for handling of tags
-        correctly.
-        """
-        TagsMixin.startTest(self, test)
-        self._current_test = test
-        self._current_test_filtered = False
-        self._buffered_calls.append(('startTest', [test], {}))
-
-    def stopTest(self, test):
-        """Stop a test.
-
-        Not directly passed to the client, but used for handling of tags
-        correctly.
-        """
-        if not self._current_test_filtered:
-            for method, args, kwargs in self._buffered_calls:
-                getattr(self.decorated, method)(*args, **kwargs)
-            self.decorated.stopTest(test)
-        self._current_test = None
-        self._current_test_filtered = None
-        self._buffered_calls = []
-        TagsMixin.stopTest(self, test)
-
-    def tags(self, new_tags, gone_tags):
-        TagsMixin.tags(self, new_tags, gone_tags)
-        if self._current_test is not None:
-            self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
-        else:
-            return super(_PredicateFilter, self).tags(new_tags, gone_tags)
-
-    def time(self, a_time):
-        return self.decorated.time(a_time)
-
-    def id_to_orig_id(self, id):
-        if id.startswith("subunit.RemotedTestCase."):
-            return id[len("subunit.RemotedTestCase."):]
-        return id
-
-
-class TestResultFilter(TestResultDecorator):
-    """A pyunit TestResult interface implementation which filters tests.
-
-    Tests that pass the filter are handed on to another TestResult instance
-    for further processing/reporting. To obtain the filtered results,
-    the other instance must be interrogated.
-
-    :ivar result: The result that tests are passed to after filtering.
-    :ivar filter_predicate: The callback run to decide whether to pass
-        a result.
-    """
-
-    def __init__(self, result, filter_error=False, filter_failure=False,
-        filter_success=True, filter_skip=False, filter_xfail=False,
-        filter_predicate=None, fixup_expected_failures=None):
-        """Create a FilterResult object filtering to result.
-
-        :param filter_error: Filter out errors.
-        :param filter_failure: Filter out failures.
-        :param filter_success: Filter out successful tests.
-        :param filter_skip: Filter out skipped tests.
-        :param filter_xfail: Filter out expected failure tests.
-        :param filter_predicate: A callable taking (test, outcome, err,
-            details, tags) and returning True if the result should be passed
-            through.  err and details may be none if no error or extra
-            metadata is available. outcome is the name of the outcome such
-            as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
-            are still supported but should be updated to accept the tags
-            parameter for efficiency.
-        :param fixup_expected_failures: Set of test ids to consider known
-            failing.
-        """
-        predicates = []
-        if filter_error:
-            predicates.append(
-                lambda t, outcome, e, d, tags: outcome != 'error')
-        if filter_failure:
-            predicates.append(
-                lambda t, outcome, e, d, tags: outcome != 'failure')
-        if filter_success:
-            predicates.append(
-                lambda t, outcome, e, d, tags: outcome != 'success')
-        if filter_skip:
-            predicates.append(
-                lambda t, outcome, e, d, tags: outcome != 'skip')
-        if filter_xfail:
-            predicates.append(
-                lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
-        if filter_predicate is not None:
-            def compat(test, outcome, error, details, tags):
-                # 0.0.7 and earlier did not support the 'tags' parameter.
-                try:
-                    return filter_predicate(
-                        test, outcome, error, details, tags)
-                except TypeError:
-                    return filter_predicate(test, outcome, error, details)
-            predicates.append(compat)
-        predicate = and_predicates(predicates)
-        super(TestResultFilter, self).__init__(
-            _PredicateFilter(result, predicate))
-        if fixup_expected_failures is None:
-            self._fixup_expected_failures = frozenset()
-        else:
-            self._fixup_expected_failures = fixup_expected_failures
-
-    def addError(self, test, err=None, details=None):
-        if self._failure_expected(test):
-            self.addExpectedFailure(test, err=err, details=details)
-        else:
-            super(TestResultFilter, self).addError(
-                test, err=err, details=details)
-
-    def addFailure(self, test, err=None, details=None):
-        if self._failure_expected(test):
-            self.addExpectedFailure(test, err=err, details=details)
-        else:
-            super(TestResultFilter, self).addFailure(
-                test, err=err, details=details)
-
-    def addSuccess(self, test, details=None):
-        if self._failure_expected(test):
-            self.addUnexpectedSuccess(test, details=details)
-        else:
-            super(TestResultFilter, self).addSuccess(test, details=details)
-
-    def _failure_expected(self, test):
-        return (test.id() in self._fixup_expected_failures)
-
-
-class TestIdPrintingResult(testtools.TestResult):
-    """Print test ids to a stream.
-
-    Implements both TestResult and StreamResult, for compatibility.
-    """
-
-    def __init__(self, stream, show_times=False, show_exists=False):
-        """Create a FilterResult object outputting to stream."""
-        super(TestIdPrintingResult, self).__init__()
-        self._stream = stream
-        self.show_exists = show_exists
-        self.show_times = show_times
-
-    def startTestRun(self):
-        self.failed_tests = 0
-        self.__time = None
-        self._test = None
-        self._test_duration = 0
-        self._active_tests = {}
-
-    def addError(self, test, err):
-        self.failed_tests += 1
-        self._test = test
-
-    def addFailure(self, test, err):
-        self.failed_tests += 1
-        self._test = test
-
-    def addSuccess(self, test):
-        self._test = test
-
-    def addSkip(self, test, reason=None, details=None):
-        self._test = test
-
-    def addUnexpectedSuccess(self, test, details=None):
-        self.failed_tests += 1
-        self._test = test
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        self._test = test
-
-    def reportTest(self, test_id, duration):
-        if self.show_times:
-            seconds = duration.seconds
-            seconds += duration.days * 3600 * 24
-            seconds += duration.microseconds / 1000000.0
-            self._stream.write(test_id + ' %0.3f\n' % seconds)
-        else:
-            self._stream.write(test_id + '\n')
-
-    def startTest(self, test):
-        self._start_time = self._time()
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        if not test_id:
-            return
-        if timestamp is not None:
-            self.time(timestamp)
-        if test_status=='exists':
-            if self.show_exists:
-                self.reportTest(test_id, 0)
-        elif test_status in ('inprogress', None):
-            self._active_tests[test_id] = self._time()
-        else:
-            self._end_test(test_id)
-
-    def _end_test(self, test_id):
-        test_start = self._active_tests.pop(test_id, None)
-        if not test_start:
-            test_duration = 0
-        else:
-            test_duration = self._time() - test_start
-        self.reportTest(test_id, test_duration)
-
-    def stopTest(self, test):
-        test_duration = self._time() - self._start_time
-        self.reportTest(self._test.id(), test_duration)
-
-    def time(self, time):
-        self.__time = time
-
-    def _time(self):
-        return self.__time
-
-    def wasSuccessful(self):
-        "Tells whether or not this result was a success"
-        return self.failed_tests == 0
-
-    def stopTestRun(self):
-        for test_id in list(self._active_tests.keys()):
-            self._end_test(test_id)
-
-
-class TestByTestResult(testtools.TestResult):
-    """Call something every time a test completes."""
-
-# XXX: In testtools since lp:testtools r249.  Once that's released, just
-# import that.
-
-    def __init__(self, on_test):
-        """Construct a ``TestByTestResult``.
-
-        :param on_test: A callable that take a test case, a status (one of
-            "success", "failure", "error", "skip", or "xfail"), a start time
-            (a ``datetime`` with timezone), a stop time, an iterable of tags,
-            and a details dict. Is called at the end of each test (i.e. on
-            ``stopTest``) with the accumulated values for that test.
-        """
-        super(TestByTestResult, self).__init__()
-        self._on_test = on_test
-
-    def startTest(self, test):
-        super(TestByTestResult, self).startTest(test)
-        self._start_time = self._now()
-        # There's no supported (i.e. tested) behaviour that relies on these
-        # being set, but it makes me more comfortable all the same. -- jml
-        self._status = None
-        self._details = None
-        self._stop_time = None
-
-    def stopTest(self, test):
-        self._stop_time = self._now()
-        super(TestByTestResult, self).stopTest(test)
-        self._on_test(
-            test=test,
-            status=self._status,
-            start_time=self._start_time,
-            stop_time=self._stop_time,
-            # current_tags is new in testtools 0.9.13.
-            tags=getattr(self, 'current_tags', None),
-            details=self._details)
-
-    def _err_to_details(self, test, err, details):
-        if details:
-            return details
-        return {'traceback': TracebackContent(err, test)}
-
-    def addSuccess(self, test, details=None):
-        super(TestByTestResult, self).addSuccess(test)
-        self._status = 'success'
-        self._details = details
-
-    def addFailure(self, test, err=None, details=None):
-        super(TestByTestResult, self).addFailure(test, err, details)
-        self._status = 'failure'
-        self._details = self._err_to_details(test, err, details)
-
-    def addError(self, test, err=None, details=None):
-        super(TestByTestResult, self).addError(test, err, details)
-        self._status = 'error'
-        self._details = self._err_to_details(test, err, details)
-
-    def addSkip(self, test, reason=None, details=None):
-        super(TestByTestResult, self).addSkip(test, reason, details)
-        self._status = 'skip'
-        if details is None:
-            details = {'reason': text_content(reason)}
-        elif reason:
-            # XXX: What if details already has 'reason' key?
-            details['reason'] = text_content(reason)
-        self._details = details
-
-    def addExpectedFailure(self, test, err=None, details=None):
-        super(TestByTestResult, self).addExpectedFailure(test, err, details)
-        self._status = 'xfail'
-        self._details = self._err_to_details(test, err, details)
-
-    def addUnexpectedSuccess(self, test, details=None):
-        super(TestByTestResult, self).addUnexpectedSuccess(test, details)
-        self._status = 'success'
-        self._details = details
-
-
-class CsvResult(TestByTestResult):
-
-    def __init__(self, stream):
-        super(CsvResult, self).__init__(self._on_test)
-        self._write_row = csv.writer(stream).writerow
-
-    def _on_test(self, test, status, start_time, stop_time, tags, details):
-        self._write_row([test.id(), status, start_time, stop_time])
-
-    def startTestRun(self):
-        super(CsvResult, self).startTestRun()
-        self._write_row(['test', 'status', 'start_time', 'stop_time'])
-
-
-class CatFiles(StreamResult):
-    """Cat file attachments received to a stream."""
-
-    def __init__(self, byte_stream):
-        self.stream = subunit.make_stream_binary(byte_stream)
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        if file_name is not None:
-            self.stream.write(file_bytes)
-            self.stream.flush()
diff --git a/lib/subunit/python/subunit/tests/__init__.py b/lib/subunit/python/subunit/tests/__init__.py
deleted file mode 100644
index 29aed8d..0000000
--- a/lib/subunit/python/subunit/tests/__init__.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import sys
-from unittest import TestLoader
-
-from testscenarios import generate_scenarios
-
-
-# Before the test module imports to avoid circularity.
-# For testing: different pythons have different str() implementations.
-if sys.version_info > (3, 0):
-    _remote_exception_repr = "testtools.testresult.real._StringException"
-    _remote_exception_str = "Traceback (most recent call last):\ntesttools.testresult.real._StringException"
-    _remote_exception_str_chunked = "57\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
-else:
-    _remote_exception_repr = "_StringException"
-    _remote_exception_str = "Traceback (most recent call last):\n_StringException"
-    _remote_exception_str_chunked = "3D\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
-
-
-from subunit.tests import (
-    test_chunked,
-    test_details,
-    test_filters,
-    test_output_filter,
-    test_progress_model,
-    test_run,
-    test_subunit_filter,
-    test_subunit_stats,
-    test_subunit_tags,
-    test_tap2subunit,
-    test_test_protocol,
-    test_test_protocol2,
-    test_test_results,
-    )
-
-
-def test_suite():
-    loader = TestLoader()
-    result = loader.loadTestsFromModule(test_chunked)
-    result.addTest(loader.loadTestsFromModule(test_details))
-    result.addTest(loader.loadTestsFromModule(test_filters))
-    result.addTest(loader.loadTestsFromModule(test_progress_model))
-    result.addTest(loader.loadTestsFromModule(test_test_results))
-    result.addTest(loader.loadTestsFromModule(test_test_protocol))
-    result.addTest(loader.loadTestsFromModule(test_test_protocol2))
-    result.addTest(loader.loadTestsFromModule(test_tap2subunit))
-    result.addTest(loader.loadTestsFromModule(test_subunit_filter))
-    result.addTest(loader.loadTestsFromModule(test_subunit_tags))
-    result.addTest(loader.loadTestsFromModule(test_subunit_stats))
-    result.addTest(loader.loadTestsFromModule(test_run))
-    result.addTests(
-        generate_scenarios(loader.loadTestsFromModule(test_output_filter))
-    )
-    return result
diff --git a/lib/subunit/python/subunit/tests/sample-script.py b/lib/subunit/python/subunit/tests/sample-script.py
deleted file mode 100755
index 91838f6..0000000
--- a/lib/subunit/python/subunit/tests/sample-script.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-import sys
-if sys.platform == "win32":
-    import msvcrt, os
-    msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
-if len(sys.argv) == 2:
-    # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args 
-    # uses this code path to be sure that the arguments were passed to
-    # sample-script.py
-    print("test fail")
-    print("error fail")
-    sys.exit(0)
-print("test old mcdonald")
-print("success old mcdonald")
-print("test bing crosby")
-print("failure bing crosby [")
-print("foo.c:53:ERROR invalid state")
-print("]")
-print("test an error")
-print("error an error")
-sys.exit(0)
diff --git a/lib/subunit/python/subunit/tests/sample-two-script.py b/lib/subunit/python/subunit/tests/sample-two-script.py
deleted file mode 100755
index fc73dfc..0000000
--- a/lib/subunit/python/subunit/tests/sample-two-script.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python
-import sys
-print("test old mcdonald")
-print("success old mcdonald")
-print("test bing crosby")
-print("success bing crosby")
-sys.exit(0)
diff --git a/lib/subunit/python/subunit/tests/test_chunked.py b/lib/subunit/python/subunit/tests/test_chunked.py
deleted file mode 100644
index 5100b32..0000000
--- a/lib/subunit/python/subunit/tests/test_chunked.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#  Copyright (C) 2011  Martin Pool <mbp at sourcefrog.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import unittest
-
-from testtools.compat import _b, BytesIO
-
-import subunit.chunked
-
-
-class TestDecode(unittest.TestCase):
-
-    def setUp(self):
-        unittest.TestCase.setUp(self)
-        self.output = BytesIO()
-        self.decoder = subunit.chunked.Decoder(self.output)
-
-    def test_close_read_length_short_errors(self):
-        self.assertRaises(ValueError, self.decoder.close)
-
-    def test_close_body_short_errors(self):
-        self.assertEqual(None, self.decoder.write(_b('2\r\na')))
-        self.assertRaises(ValueError, self.decoder.close)
-
-    def test_close_body_buffered_data_errors(self):
-        self.assertEqual(None, self.decoder.write(_b('2\r')))
-        self.assertRaises(ValueError, self.decoder.close)
-
-    def test_close_after_finished_stream_safe(self):
-        self.assertEqual(None, self.decoder.write(_b('2\r\nab')))
-        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
-        self.decoder.close()
-
-    def test_decode_nothing(self):
-        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
-        self.assertEqual(_b(''), self.output.getvalue())
-
-    def test_decode_serialised_form(self):
-        self.assertEqual(None, self.decoder.write(_b("F\r\n")))
-        self.assertEqual(None, self.decoder.write(_b("serialised\n")))
-        self.assertEqual(_b(''), self.decoder.write(_b("form0\r\n")))
-
-    def test_decode_short(self):
-        self.assertEqual(_b(''), self.decoder.write(_b('3\r\nabc0\r\n')))
-        self.assertEqual(_b('abc'), self.output.getvalue())
-
-    def test_decode_combines_short(self):
-        self.assertEqual(_b(''), self.decoder.write(_b('6\r\nabcdef0\r\n')))
-        self.assertEqual(_b('abcdef'), self.output.getvalue())
-
-    def test_decode_excess_bytes_from_write(self):
-        self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
-        self.assertEqual(_b('abc'), self.output.getvalue())
-
-    def test_decode_write_after_finished_errors(self):
-        self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
-        self.assertRaises(ValueError, self.decoder.write, _b(''))
-
-    def test_decode_hex(self):
-        self.assertEqual(_b(''), self.decoder.write(_b('A\r\n12345678900\r\n')))
-        self.assertEqual(_b('1234567890'), self.output.getvalue())
-
-    def test_decode_long_ranges(self):
-        self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
-        self.assertEqual(None, self.decoder.write(_b('1' * 65536)))
-        self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
-        self.assertEqual(None, self.decoder.write(_b('2' * 65536)))
-        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
-        self.assertEqual(_b('1' * 65536 + '2' * 65536), self.output.getvalue())
-
-    def test_decode_newline_nonstrict(self):
-        """Tolerate chunk markers with no CR character."""
-        # From <http://pad.lv/505078>
-        self.decoder = subunit.chunked.Decoder(self.output, strict=False)
-        self.assertEqual(None, self.decoder.write(_b('a\n')))
-        self.assertEqual(None, self.decoder.write(_b('abcdeabcde')))
-        self.assertEqual(_b(''), self.decoder.write(_b('0\n')))
-        self.assertEqual(_b('abcdeabcde'), self.output.getvalue())
-
-    def test_decode_strict_newline_only(self):
-        """Reject chunk markers with no CR character in strict mode."""
-        # From <http://pad.lv/505078>
-        self.assertRaises(ValueError,
-            self.decoder.write, _b('a\n'))
-
-    def test_decode_strict_multiple_crs(self):
-        self.assertRaises(ValueError,
-            self.decoder.write, _b('a\r\r\n'))
-
-    def test_decode_short_header(self):
-        self.assertRaises(ValueError,
-            self.decoder.write, _b('\n'))
-
-
-class TestEncode(unittest.TestCase):
-
-    def setUp(self):
-        unittest.TestCase.setUp(self)
-        self.output = BytesIO()
-        self.encoder = subunit.chunked.Encoder(self.output)
-
-    def test_encode_nothing(self):
-        self.encoder.close()
-        self.assertEqual(_b('0\r\n'), self.output.getvalue())
-
-    def test_encode_empty(self):
-        self.encoder.write(_b(''))
-        self.encoder.close()
-        self.assertEqual(_b('0\r\n'), self.output.getvalue())
-
-    def test_encode_short(self):
-        self.encoder.write(_b('abc'))
-        self.encoder.close()
-        self.assertEqual(_b('3\r\nabc0\r\n'), self.output.getvalue())
-
-    def test_encode_combines_short(self):
-        self.encoder.write(_b('abc'))
-        self.encoder.write(_b('def'))
-        self.encoder.close()
-        self.assertEqual(_b('6\r\nabcdef0\r\n'), self.output.getvalue())
-
-    def test_encode_over_9_is_in_hex(self):
-        self.encoder.write(_b('1234567890'))
-        self.encoder.close()
-        self.assertEqual(_b('A\r\n12345678900\r\n'), self.output.getvalue())
-
-    def test_encode_long_ranges_not_combined(self):
-        self.encoder.write(_b('1' * 65536))
-        self.encoder.write(_b('2' * 65536))
-        self.encoder.close()
-        self.assertEqual(_b('10000\r\n' + '1' * 65536 + '10000\r\n' +
-            '2' * 65536 + '0\r\n'), self.output.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_details.py b/lib/subunit/python/subunit/tests/test_details.py
deleted file mode 100644
index 8605c5a..0000000
--- a/lib/subunit/python/subunit/tests/test_details.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import unittest
-
-from testtools.compat import _b, StringIO
-
-import subunit.tests
-from subunit import content, content_type, details
-
-
-class TestSimpleDetails(unittest.TestCase):
-
-    def test_lineReceived(self):
-        parser = details.SimpleDetailsParser(None)
-        parser.lineReceived(_b("foo\n"))
-        parser.lineReceived(_b("bar\n"))
-        self.assertEqual(_b("foo\nbar\n"), parser._message)
-
-    def test_lineReceived_escaped_bracket(self):
-        parser = details.SimpleDetailsParser(None)
-        parser.lineReceived(_b("foo\n"))
-        parser.lineReceived(_b(" ]are\n"))
-        parser.lineReceived(_b("bar\n"))
-        self.assertEqual(_b("foo\n]are\nbar\n"), parser._message)
-
-    def test_get_message(self):
-        parser = details.SimpleDetailsParser(None)
-        self.assertEqual(_b(""), parser.get_message())
-
-    def test_get_details(self):
-        parser = details.SimpleDetailsParser(None)
-        traceback = ""
-        expected = {}
-        expected['traceback'] = content.Content(
-            content_type.ContentType("text", "x-traceback",
-                {'charset': 'utf8'}),
-            lambda:[_b("")])
-        found = parser.get_details()
-        self.assertEqual(expected.keys(), found.keys())
-        self.assertEqual(expected['traceback'].content_type,
-            found['traceback'].content_type)
-        self.assertEqual(_b('').join(expected['traceback'].iter_bytes()),
-            _b('').join(found['traceback'].iter_bytes()))
-
-    def test_get_details_skip(self):
-        parser = details.SimpleDetailsParser(None)
-        traceback = ""
-        expected = {}
-        expected['reason'] = content.Content(
-            content_type.ContentType("text", "plain"),
-            lambda:[_b("")])
-        found = parser.get_details("skip")
-        self.assertEqual(expected, found)
-
-    def test_get_details_success(self):
-        parser = details.SimpleDetailsParser(None)
-        traceback = ""
-        expected = {}
-        expected['message'] = content.Content(
-            content_type.ContentType("text", "plain"),
-            lambda:[_b("")])
-        found = parser.get_details("success")
-        self.assertEqual(expected, found)
-
-
-class TestMultipartDetails(unittest.TestCase):
-
-    def test_get_message_is_None(self):
-        parser = details.MultipartDetailsParser(None)
-        self.assertEqual(None, parser.get_message())
-
-    def test_get_details(self):
-        parser = details.MultipartDetailsParser(None)
-        self.assertEqual({}, parser.get_details())
-
-    def test_parts(self):
-        parser = details.MultipartDetailsParser(None)
-        parser.lineReceived(_b("Content-Type: text/plain\n"))
-        parser.lineReceived(_b("something\n"))
-        parser.lineReceived(_b("F\r\n"))
-        parser.lineReceived(_b("serialised\n"))
-        parser.lineReceived(_b("form0\r\n"))
-        expected = {}
-        expected['something'] = content.Content(
-            content_type.ContentType("text", "plain"),
-            lambda:[_b("serialised\nform")])
-        found = parser.get_details()
-        self.assertEqual(expected.keys(), found.keys())
-        self.assertEqual(expected['something'].content_type,
-            found['something'].content_type)
-        self.assertEqual(_b('').join(expected['something'].iter_bytes()),
-            _b('').join(found['something'].iter_bytes()))
diff --git a/lib/subunit/python/subunit/tests/test_filters.py b/lib/subunit/python/subunit/tests/test_filters.py
deleted file mode 100644
index 0a5e7c7..0000000
--- a/lib/subunit/python/subunit/tests/test_filters.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import sys
-from tempfile import NamedTemporaryFile
-
-from testtools import TestCase
-
-from subunit.filters import find_stream
-
-
-class TestFindStream(TestCase):
-
-    def test_no_argv(self):
-        self.assertEqual('foo', find_stream('foo', []))
-
-    def test_opens_file(self):
-        f = NamedTemporaryFile()
-        f.write(b'foo')
-        f.flush()
-        stream = find_stream('bar', [f.name])
-        self.assertEqual(b'foo', stream.read())
diff --git a/lib/subunit/python/subunit/tests/test_output_filter.py b/lib/subunit/python/subunit/tests/test_output_filter.py
deleted file mode 100644
index 0f61ac5..0000000
--- a/lib/subunit/python/subunit/tests/test_output_filter.py
+++ /dev/null
@@ -1,596 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2013 Subunit Contributors
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import datetime
-from functools import partial
-from io import BytesIO, StringIO, TextIOWrapper
-import optparse
-import sys
-from tempfile import NamedTemporaryFile
-
-from contextlib import contextmanager
-from testtools import TestCase
-from testtools.compat import _u
-from testtools.matchers import (
-    Equals,
-    Matcher,
-    MatchesAny,
-    MatchesListwise,
-    Mismatch,
-    raises,
-)
-from testtools.testresult.doubles import StreamResult
-
-from subunit.iso8601 import UTC
-from subunit.v2 import StreamResultToBytes, ByteStreamToStreamResult
-from subunit._output import (
-    _ALL_ACTIONS,
-    _FINAL_ACTIONS,
-    generate_stream_results,
-    parse_arguments,
-)
-import subunit._output as _o
-
-
-class SafeOptionParser(optparse.OptionParser):
-    """An ArgumentParser class that doesn't call sys.exit."""
-
-    def exit(self, status=0, message=""):
-        raise RuntimeError(message)
-
-    def error(self, message):
-        raise RuntimeError(message)
-
-
-safe_parse_arguments = partial(parse_arguments, ParserClass=SafeOptionParser)
-
-
-class TestStatusArgParserTests(TestCase):
-
-    scenarios = [
-        (cmd, dict(command=cmd, option='--' + cmd)) for cmd in _ALL_ACTIONS
-    ]
-
-    def test_can_parse_all_commands_with_test_id(self):
-        test_id = self.getUniqueString()
-        args = safe_parse_arguments(args=[self.option, test_id])
-
-        self.assertThat(args.action, Equals(self.command))
-        self.assertThat(args.test_id, Equals(test_id))
-
-    def test_all_commands_parse_file_attachment(self):
-        with NamedTemporaryFile() as tmp_file:
-            args = safe_parse_arguments(
-                args=[self.option, 'foo', '--attach-file', tmp_file.name]
-            )
-            self.assertThat(args.attach_file.name, Equals(tmp_file.name))
-
-    def test_all_commands_accept_mimetype_argument(self):
-        with NamedTemporaryFile() as tmp_file:
-            args = safe_parse_arguments(
-                args=[self.option, 'foo', '--attach-file', tmp_file.name, '--mimetype', "text/plain"]
-            )
-            self.assertThat(args.mimetype, Equals("text/plain"))
-
-    def test_all_commands_accept_file_name_argument(self):
-        with NamedTemporaryFile() as tmp_file:
-            args = safe_parse_arguments(
-                args=[self.option, 'foo', '--attach-file', tmp_file.name, '--file-name', "foo"]
-            )
-            self.assertThat(args.file_name, Equals("foo"))
-
-    def test_all_commands_accept_tags_argument(self):
-        args = safe_parse_arguments(
-            args=[self.option, 'foo', '--tag', "foo", "--tag", "bar", "--tag", "baz"]
-        )
-        self.assertThat(args.tags, Equals(["foo", "bar", "baz"]))
-
-    def test_attach_file_with_hyphen_opens_stdin(self):
-        self.patch(_o.sys, 'stdin', TextIOWrapper(BytesIO(b"Hello")))
-        args = safe_parse_arguments(
-            args=[self.option, "foo", "--attach-file", "-"]
-        )
-
-        self.assertThat(args.attach_file.read(), Equals(b"Hello"))
-
-    def test_attach_file_with_hyphen_sets_filename_to_stdin(self):
-        args = safe_parse_arguments(
-            args=[self.option, "foo", "--attach-file", "-"]
-        )
-
-        self.assertThat(args.file_name, Equals("stdin"))
-
-    def test_can_override_stdin_filename(self):
-        args = safe_parse_arguments(
-            args=[self.option, "foo", "--attach-file", "-", '--file-name', 'foo']
-        )
-
-        self.assertThat(args.file_name, Equals("foo"))
-
-    def test_requires_test_id(self):
-        fn = lambda: safe_parse_arguments(args=[self.option])
-        self.assertThat(
-            fn,
-            raises(RuntimeError('argument %s: must specify a single TEST_ID.' % self.option))
-        )
-
-
-class ArgParserTests(TestCase):
-
-    def test_can_parse_attach_file_without_test_id(self):
-        with NamedTemporaryFile() as tmp_file:
-            args = safe_parse_arguments(
-                args=["--attach-file", tmp_file.name]
-            )
-            self.assertThat(args.attach_file.name, Equals(tmp_file.name))
-
-    def test_can_run_without_args(self):
-        args = safe_parse_arguments([])
-
-    def test_cannot_specify_more_than_one_status_command(self):
-        fn = lambda: safe_parse_arguments(['--fail', 'foo', '--skip', 'bar'])
-        self.assertThat(
-            fn,
-            raises(RuntimeError('argument --skip: Only one status may be specified at once.'))
-        )
-
-    def test_cannot_specify_mimetype_without_attach_file(self):
-        fn = lambda: safe_parse_arguments(['--mimetype', 'foo'])
-        self.assertThat(
-            fn,
-            raises(RuntimeError('Cannot specify --mimetype without --attach-file'))
-        )
-
-    def test_cannot_specify_filename_without_attach_file(self):
-        fn = lambda: safe_parse_arguments(['--file-name', 'foo'])
-        self.assertThat(
-            fn,
-            raises(RuntimeError('Cannot specify --file-name without --attach-file'))
-        )
-
-    def test_can_specify_tags_without_status_command(self):
-        args = safe_parse_arguments(['--tag', 'foo'])
-        self.assertEqual(['foo'], args.tags)
-
-    def test_must_specify_tags_with_tags_options(self):
-        fn = lambda: safe_parse_arguments(['--fail', 'foo', '--tag'])
-        self.assertThat(
-            fn,
-            MatchesAny(
-                raises(RuntimeError('--tag option requires 1 argument')),
-                raises(RuntimeError('--tag option requires an argument')),
-            )
-        )
-
-def get_result_for(commands):
-    """Get a result object from *commands.
-
-    Runs the 'generate_stream_results' function from subunit._output after
-    parsing *commands as if they were specified on the command line. The
-    resulting bytestream is then converted back into a result object and
-    returned.
-    """
-    result = StreamResult()
-    args = safe_parse_arguments(commands)
-    generate_stream_results(args, result)
-    return result
-
-
- at contextmanager
-def temp_file_contents(data):
-    """Create a temporary file on disk containing 'data'."""
-    with NamedTemporaryFile() as f:
-        f.write(data)
-        f.seek(0)
-        yield f
-
-
-class StatusStreamResultTests(TestCase):
-
-    scenarios = [
-        (s, dict(status=s, option='--' + s)) for s in _ALL_ACTIONS
-    ]
-
-    _dummy_timestamp = datetime.datetime(2013, 1, 1, 0, 0, 0, 0, UTC)
-
-    def setUp(self):
-        super(StatusStreamResultTests, self).setUp()
-        self.patch(_o, 'create_timestamp', lambda: self._dummy_timestamp)
-        self.test_id = self.getUniqueString()
-
-    def test_only_one_packet_is_generated(self):
-        result = get_result_for([self.option, self.test_id])
-        self.assertThat(
-            len(result._events),
-            Equals(3) # startTestRun and stopTestRun are also called, making 3 total.
-        )
-
-    def test_correct_status_is_generated(self):
-        result = get_result_for([self.option, self.test_id])
-
-        self.assertThat(
-            result._events[1],
-            MatchesStatusCall(test_status=self.status)
-        )
-
-    def test_all_commands_generate_tags(self):
-        result = get_result_for([self.option, self.test_id, '--tag', 'hello', '--tag', 'world'])
-        self.assertThat(
-            result._events[1],
-            MatchesStatusCall(test_tags=set(['hello', 'world']))
-        )
-
-    def test_all_commands_generate_timestamp(self):
-        result = get_result_for([self.option, self.test_id])
-
-        self.assertThat(
-            result._events[1],
-            MatchesStatusCall(timestamp=self._dummy_timestamp)
-        )
-
-    def test_all_commands_generate_correct_test_id(self):
-        result = get_result_for([self.option, self.test_id])
-
-        self.assertThat(
-            result._events[1],
-            MatchesStatusCall(test_id=self.test_id)
-        )
-
-    def test_file_is_sent_in_single_packet(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_can_read_binary_files(self):
-        with temp_file_contents(b"\xDE\xAD\xBE\xEF") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_bytes=b"\xDE\xAD\xBE\xEF", eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_can_read_empty_files(self):
-        with temp_file_contents(b"") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_bytes=b"", file_name=f.name, eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_can_read_stdin(self):
-        self.patch(_o.sys, 'stdin', TextIOWrapper(BytesIO(b"\xFE\xED\xFA\xCE")))
-        result = get_result_for([self.option, self.test_id, '--attach-file', '-'])
-
-        self.assertThat(
-            result._events,
-            MatchesListwise([
-                MatchesStatusCall(call='startTestRun'),
-                MatchesStatusCall(file_bytes=b"\xFE\xED\xFA\xCE", file_name='stdin', eof=True),
-                MatchesStatusCall(call='stopTestRun'),
-            ])
-        )
-
-    def test_file_is_sent_with_test_id(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_file_is_sent_with_test_status(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_status=self.status, file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_file_chunk_size_is_honored(self):
-        with temp_file_contents(b"Hello") as f:
-            self.patch(_o, '_CHUNK_SIZE', 1)
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'H', eof=False),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'e', eof=False),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'l', eof=False),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'l', eof=False),
-                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'o', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_file_mimetype_specified_once_only(self):
-        with temp_file_contents(b"Hi") as f:
-            self.patch(_o, '_CHUNK_SIZE', 1)
-            result = get_result_for([
-                self.option,
-                self.test_id,
-                '--attach-file',
-                f.name,
-                '--mimetype',
-                'text/plain',
-            ])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=self.test_id, mime_type='text/plain', file_bytes=b'H', eof=False),
-                    MatchesStatusCall(test_id=self.test_id, mime_type=None, file_bytes=b'i', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_tags_specified_once_only(self):
-        with temp_file_contents(b"Hi") as f:
-            self.patch(_o, '_CHUNK_SIZE', 1)
-            result = get_result_for([
-                self.option,
-                self.test_id,
-                '--attach-file',
-                f.name,
-                '--tag',
-                'foo',
-                '--tag',
-                'bar',
-            ])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=self.test_id, test_tags=set(['foo', 'bar'])),
-                    MatchesStatusCall(test_id=self.test_id, test_tags=None),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_timestamp_specified_once_only(self):
-        with temp_file_contents(b"Hi") as f:
-            self.patch(_o, '_CHUNK_SIZE', 1)
-            result = get_result_for([
-                self.option,
-                self.test_id,
-                '--attach-file',
-                f.name,
-            ])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=self.test_id, timestamp=self._dummy_timestamp),
-                    MatchesStatusCall(test_id=self.test_id, timestamp=None),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_test_status_specified_once_only(self):
-        with temp_file_contents(b"Hi") as f:
-            self.patch(_o, '_CHUNK_SIZE', 1)
-            result = get_result_for([
-                self.option,
-                self.test_id,
-                '--attach-file',
-                f.name,
-            ])
-
-            # 'inprogress' status should be on the first packet only, all other
-            # statuses should be on the last packet.
-            if self.status in _FINAL_ACTIONS:
-                first_call = MatchesStatusCall(test_id=self.test_id, test_status=None)
-                last_call = MatchesStatusCall(test_id=self.test_id, test_status=self.status)
-            else:
-                first_call = MatchesStatusCall(test_id=self.test_id, test_status=self.status)
-                last_call = MatchesStatusCall(test_id=self.test_id, test_status=None)
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    first_call,
-                    last_call,
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_filename_can_be_overridden(self):
-        with temp_file_contents(b"Hello") as f:
-            specified_file_name = self.getUniqueString()
-            result = get_result_for([
-                self.option,
-                self.test_id,
-                '--attach-file',
-                f.name,
-                '--file-name',
-                specified_file_name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_name=specified_file_name, file_bytes=b'Hello'),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_file_name_is_used_by_default(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_name=f.name, file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-
-class FileDataTests(TestCase):
-
-    def test_can_attach_file_without_test_id(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for(['--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(test_id=None, file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_file_name_is_used_by_default(self):
-        with temp_file_contents(b"Hello") as f:
-            result = get_result_for(['--attach-file', f.name])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_name=f.name, file_bytes=b'Hello', eof=True),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_filename_can_be_overridden(self):
-        with temp_file_contents(b"Hello") as f:
-            specified_file_name = self.getUniqueString()
-            result = get_result_for([
-                '--attach-file',
-                f.name,
-                '--file-name',
-                specified_file_name
-            ])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_name=specified_file_name, file_bytes=b'Hello'),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_files_have_timestamp(self):
-        _dummy_timestamp = datetime.datetime(2013, 1, 1, 0, 0, 0, 0, UTC)
-        self.patch(_o, 'create_timestamp', lambda: _dummy_timestamp)
-
-        with temp_file_contents(b"Hello") as f:
-            specified_file_name = self.getUniqueString()
-            result = get_result_for([
-                '--attach-file',
-                f.name,
-            ])
-
-            self.assertThat(
-                result._events,
-                MatchesListwise([
-                    MatchesStatusCall(call='startTestRun'),
-                    MatchesStatusCall(file_bytes=b'Hello', timestamp=_dummy_timestamp),
-                    MatchesStatusCall(call='stopTestRun'),
-                ])
-            )
-
-    def test_can_specify_tags_without_test_status(self):
-        result = get_result_for([
-            '--tag',
-            'foo',
-        ])
-
-        self.assertThat(
-            result._events,
-            MatchesListwise([
-                MatchesStatusCall(call='startTestRun'),
-                MatchesStatusCall(test_tags=set(['foo'])),
-                MatchesStatusCall(call='stopTestRun'),
-            ])
-        )
-
-
-class MatchesStatusCall(Matcher):
-
-    _position_lookup = {
-        'call': 0,
-        'test_id': 1,
-        'test_status': 2,
-        'test_tags': 3,
-        'runnable': 4,
-        'file_name': 5,
-        'file_bytes': 6,
-        'eof': 7,
-        'mime_type': 8,
-        'route_code': 9,
-        'timestamp': 10,
-    }
-
-    def __init__(self, **kwargs):
-        unknown_kwargs = list(filter(
-            lambda k: k not in self._position_lookup,
-            kwargs
-        ))
-        if unknown_kwargs:
-            raise ValueError("Unknown keywords: %s" % ','.join(unknown_kwargs))
-        self._filters = kwargs
-
-    def match(self, call_tuple):
-        for k, v in self._filters.items():
-            try:
-                pos = self._position_lookup[k]
-                if call_tuple[pos] != v:
-                    return Mismatch(
-                        "Value for key is %r, not %r" % (call_tuple[pos], v)
-                    )
-            except IndexError:
-                return Mismatch("Key %s is not present." % k)
-
-    def __str__(self):
-        return "<MatchesStatusCall %r>" % self._filters
diff --git a/lib/subunit/python/subunit/tests/test_progress_model.py b/lib/subunit/python/subunit/tests/test_progress_model.py
deleted file mode 100644
index 2ca0888..0000000
--- a/lib/subunit/python/subunit/tests/test_progress_model.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import unittest
-
-import subunit
-from subunit.progress_model import ProgressModel
-
-
-class TestProgressModel(unittest.TestCase):
-
-    def assertProgressSummary(self, pos, total, progress):
-        """Assert that a progress model has reached a particular point."""
-        self.assertEqual(pos, progress.pos())
-        self.assertEqual(total, progress.width())
-
-    def test_new_progress_0_0(self):
-        progress = ProgressModel()
-        self.assertProgressSummary(0, 0, progress)
-
-    def test_advance_0_0(self):
-        progress = ProgressModel()
-        progress.advance()
-        self.assertProgressSummary(1, 0, progress)
-
-    def test_advance_1_0(self):
-        progress = ProgressModel()
-        progress.advance()
-        self.assertProgressSummary(1, 0, progress)
-
-    def test_set_width_absolute(self):
-        progress = ProgressModel()
-        progress.set_width(10)
-        self.assertProgressSummary(0, 10, progress)
-
-    def test_set_width_absolute_preserves_pos(self):
-        progress = ProgressModel()
-        progress.advance()
-        progress.set_width(2)
-        self.assertProgressSummary(1, 2, progress)
-
-    def test_adjust_width(self):
-        progress = ProgressModel()
-        progress.adjust_width(10)
-        self.assertProgressSummary(0, 10, progress)
-        progress.adjust_width(-10)
-        self.assertProgressSummary(0, 0, progress)
-
-    def test_adjust_width_preserves_pos(self):
-        progress = ProgressModel()
-        progress.advance()
-        progress.adjust_width(10)
-        self.assertProgressSummary(1, 10, progress)
-        progress.adjust_width(-10)
-        self.assertProgressSummary(1, 0, progress)
-
-    def test_push_preserves_progress(self):
-        progress = ProgressModel()
-        progress.adjust_width(3)
-        progress.advance()
-        progress.push()
-        self.assertProgressSummary(1, 3, progress)
-
-    def test_advance_advances_substack(self):
-        progress = ProgressModel()
-        progress.adjust_width(3)
-        progress.advance()
-        progress.push()
-        progress.adjust_width(1)
-        progress.advance()
-        self.assertProgressSummary(2, 3, progress)
-
-    def test_adjust_width_adjusts_substack(self):
-        progress = ProgressModel()
-        progress.adjust_width(3)
-        progress.advance()
-        progress.push()
-        progress.adjust_width(2)
-        progress.advance()
-        self.assertProgressSummary(3, 6, progress)
-
-    def test_set_width_adjusts_substack(self):
-        progress = ProgressModel()
-        progress.adjust_width(3)
-        progress.advance()
-        progress.push()
-        progress.set_width(2)
-        progress.advance()
-        self.assertProgressSummary(3, 6, progress)
-
-    def test_pop_restores_progress(self):
-        progress = ProgressModel()
-        progress.adjust_width(3)
-        progress.advance()
-        progress.push()
-        progress.adjust_width(1)
-        progress.advance()
-        progress.pop()
-        self.assertProgressSummary(1, 3, progress)
diff --git a/lib/subunit/python/subunit/tests/test_run.py b/lib/subunit/python/subunit/tests/test_run.py
deleted file mode 100644
index d92ed04..0000000
--- a/lib/subunit/python/subunit/tests/test_run.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2011  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import io
-import unittest
-
-from testtools import PlaceHolder, TestCase
-from testtools.compat import _b
-from testtools.matchers import StartsWith
-from testtools.testresult.doubles import StreamResult
-
-import subunit
-from subunit import run
-from subunit.run import SubunitTestRunner
-
-
-class TestSubunitTestRunner(TestCase):
-
-    def test_includes_timing_output(self):
-        bytestream = io.BytesIO()
-        runner = SubunitTestRunner(stream=bytestream)
-        test = PlaceHolder('name')
-        runner.run(test)
-        bytestream.seek(0)
-        eventstream = StreamResult()
-        subunit.ByteStreamToStreamResult(bytestream).run(eventstream)
-        timestamps = [event[-1] for event in eventstream._events
-            if event is not None]
-        self.assertNotEqual([], timestamps)
-
-    def test_enumerates_tests_before_run(self):
-        bytestream = io.BytesIO()
-        runner = SubunitTestRunner(stream=bytestream)
-        test1 = PlaceHolder('name1')
-        test2 = PlaceHolder('name2')
-        case = unittest.TestSuite([test1, test2])
-        runner.run(case)
-        bytestream.seek(0)
-        eventstream = StreamResult()
-        subunit.ByteStreamToStreamResult(bytestream).run(eventstream)
-        self.assertEqual([
-            ('status', 'name1', 'exists'),
-            ('status', 'name2', 'exists'),
-            ], [event[:3] for event in eventstream._events[:2]])
-
-    def test_list_errors_if_errors_from_list_test(self):
-        bytestream = io.BytesIO()
-        runner = SubunitTestRunner(stream=bytestream)
-        def list_test(test):
-            return [], ['failed import']
-        self.patch(run, 'list_test', list_test)
-        exc = self.assertRaises(SystemExit, runner.list, None)
-        self.assertEqual((2,), exc.args)
-
-    class FailingTest(TestCase):
-        def test_fail(self):
-            1/0
-
-    def test_exits_zero_when_tests_fail(self):
-        bytestream = io.BytesIO()
-        stream = io.TextIOWrapper(bytestream, encoding="utf8")
-        try:
-            self.assertEqual(None, run.main(
-                argv=["progName", "subunit.tests.test_run.TestSubunitTestRunner.FailingTest"],
-                stdout=stream))
-        except SystemExit:
-            self.fail("SystemExit raised")
-        self.assertThat(bytestream.getvalue(), StartsWith(_b('\xb3')))
-
-    def test_exits_nonzero_when_execution_errors(self):
-        bytestream = io.BytesIO()
-        stream = io.TextIOWrapper(bytestream, encoding="utf8")
-        exc = self.assertRaises(Exception, run.main,
-                argv=["progName", "subunit.tests.test_run.TestSubunitTestRunner.MissingTest"],
-                stdout=stream)
diff --git a/lib/subunit/python/subunit/tests/test_subunit_filter.py b/lib/subunit/python/subunit/tests/test_subunit_filter.py
deleted file mode 100644
index 5f34b3b..0000000
--- a/lib/subunit/python/subunit/tests/test_subunit_filter.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Tests for subunit.TestResultFilter."""
-
-from datetime import datetime
-import os
-import subprocess
-import sys
-from subunit import iso8601
-import unittest
-
-from testtools import TestCase
-from testtools.compat import _b, BytesIO
-from testtools.testresult.doubles import ExtendedTestResult, StreamResult
-
-import subunit
-from subunit.test_results import make_tag_filter, TestResultFilter
-from subunit import ByteStreamToStreamResult, StreamResultToBytes
-
-
-class TestTestResultFilter(TestCase):
-    """Test for TestResultFilter, a TestResult object which filters tests."""
-
-    # While TestResultFilter works on python objects, using a subunit stream
-    # is an easy pithy way of getting a series of test objects to call into
-    # the TestResult, and as TestResultFilter is intended for use with subunit
-    # also has the benefit of detecting any interface skew issues.
-    example_subunit_stream = _b("""\
-tags: global
-test passed
-success passed
-test failed
-tags: local
-failure failed
-test error
-error error [
-error details
-]
-test skipped
-skip skipped
-test todo
-xfail todo
-""")
-
-    def run_tests(self, result_filter, input_stream=None):
-        """Run tests through the given filter.
-
-        :param result_filter: A filtering TestResult object.
-        :param input_stream: Bytes of subunit stream data. If not provided,
-            uses TestTestResultFilter.example_subunit_stream.
-        """
-        if input_stream is None:
-            input_stream = self.example_subunit_stream
-        test = subunit.ProtocolTestCase(BytesIO(input_stream))
-        test.run(result_filter)
-
-    def test_default(self):
-        """The default is to exclude success and include everything else."""
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result)
-        self.run_tests(result_filter)
-        # skips are seen as success by default python TestResult.
-        self.assertEqual(['error'],
-            [error[0].id() for error in filtered_result.errors])
-        self.assertEqual(['failed'],
-            [failure[0].id() for failure in
-            filtered_result.failures])
-        self.assertEqual(4, filtered_result.testsRun)
-
-    def test_tag_filter(self):
-        tag_filter = make_tag_filter(['global'], ['local'])
-        result = ExtendedTestResult()
-        result_filter = TestResultFilter(
-            result, filter_success=False, filter_predicate=tag_filter)
-        self.run_tests(result_filter)
-        tests_included = [
-            event[1] for event in result._events if event[0] == 'startTest']
-        tests_expected = list(map(
-            subunit.RemotedTestCase,
-            ['passed', 'error', 'skipped', 'todo']))
-        self.assertEquals(tests_expected, tests_included)
-
-    def test_tags_tracked_correctly(self):
-        tag_filter = make_tag_filter(['a'], [])
-        result = ExtendedTestResult()
-        result_filter = TestResultFilter(
-            result, filter_success=False, filter_predicate=tag_filter)
-        input_stream = _b(
-            "test: foo\n"
-            "tags: a\n"
-            "successful: foo\n"
-            "test: bar\n"
-            "successful: bar\n")
-        self.run_tests(result_filter, input_stream)
-        foo = subunit.RemotedTestCase('foo')
-        self.assertEquals(
-            [('startTest', foo),
-             ('tags', set(['a']), set()),
-             ('addSuccess', foo),
-             ('stopTest', foo),
-             ],
-            result._events)
-
-    def test_exclude_errors(self):
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result, filter_error=True)
-        self.run_tests(result_filter)
-        # skips are seen as errors by default python TestResult.
-        self.assertEqual([], filtered_result.errors)
-        self.assertEqual(['failed'],
-            [failure[0].id() for failure in
-            filtered_result.failures])
-        self.assertEqual(3, filtered_result.testsRun)
-
-    def test_fixup_expected_failures(self):
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result,
-            fixup_expected_failures=set(["failed"]))
-        self.run_tests(result_filter)
-        self.assertEqual(['failed', 'todo'],
-            [failure[0].id() for failure in filtered_result.expectedFailures])
-        self.assertEqual([], filtered_result.failures)
-        self.assertEqual(4, filtered_result.testsRun)
-
-    def test_fixup_expected_errors(self):
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result,
-            fixup_expected_failures=set(["error"]))
-        self.run_tests(result_filter)
-        self.assertEqual(['error', 'todo'],
-            [failure[0].id() for failure in filtered_result.expectedFailures])
-        self.assertEqual([], filtered_result.errors)
-        self.assertEqual(4, filtered_result.testsRun)
-
-    def test_fixup_unexpected_success(self):
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result, filter_success=False,
-            fixup_expected_failures=set(["passed"]))
-        self.run_tests(result_filter)
-        self.assertEqual(['passed'],
-            [passed.id() for passed in filtered_result.unexpectedSuccesses])
-        self.assertEqual(5, filtered_result.testsRun)
-
-    def test_exclude_failure(self):
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result, filter_failure=True)
-        self.run_tests(result_filter)
-        self.assertEqual(['error'],
-            [error[0].id() for error in filtered_result.errors])
-        self.assertEqual([],
-            [failure[0].id() for failure in
-            filtered_result.failures])
-        self.assertEqual(3, filtered_result.testsRun)
-
-    def test_exclude_skips(self):
-        filtered_result = subunit.TestResultStats(None)
-        result_filter = TestResultFilter(filtered_result, filter_skip=True)
-        self.run_tests(result_filter)
-        self.assertEqual(0, filtered_result.skipped_tests)
-        self.assertEqual(2, filtered_result.failed_tests)
-        self.assertEqual(3, filtered_result.testsRun)
-
-    def test_include_success(self):
-        """Successes can be included if requested."""
-        filtered_result = unittest.TestResult()
-        result_filter = TestResultFilter(filtered_result,
-            filter_success=False)
-        self.run_tests(result_filter)
-        self.assertEqual(['error'],
-            [error[0].id() for error in filtered_result.errors])
-        self.assertEqual(['failed'],
-            [failure[0].id() for failure in
-            filtered_result.failures])
-        self.assertEqual(5, filtered_result.testsRun)
-
-    def test_filter_predicate(self):
-        """You can filter by predicate callbacks"""
-        # 0.0.7 and earlier did not support the 'tags' parameter, so we need
-        # to test that we still support behaviour without it.
-        filtered_result = unittest.TestResult()
-        def filter_cb(test, outcome, err, details):
-            return outcome == 'success'
-        result_filter = TestResultFilter(filtered_result,
-            filter_predicate=filter_cb,
-            filter_success=False)
-        self.run_tests(result_filter)
-        # Only success should pass
-        self.assertEqual(1, filtered_result.testsRun)
-
-    def test_filter_predicate_with_tags(self):
-        """You can filter by predicate callbacks that accept tags"""
-        filtered_result = unittest.TestResult()
-        def filter_cb(test, outcome, err, details, tags):
-            return outcome == 'success'
-        result_filter = TestResultFilter(filtered_result,
-            filter_predicate=filter_cb,
-            filter_success=False)
-        self.run_tests(result_filter)
-        # Only success should pass
-        self.assertEqual(1, filtered_result.testsRun)
-
-    def test_time_ordering_preserved(self):
-        # Passing a subunit stream through TestResultFilter preserves the
-        # relative ordering of 'time' directives and any other subunit
-        # directives that are still included.
-        date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
-        date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
-        date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
-        subunit_stream = _b('\n'.join([
-            "time: %s",
-            "test: foo",
-            "time: %s",
-            "error: foo",
-            "time: %s",
-            ""]) % (date_a, date_b, date_c))
-        result = ExtendedTestResult()
-        result_filter = TestResultFilter(result)
-        self.run_tests(result_filter, subunit_stream)
-        foo = subunit.RemotedTestCase('foo')
-        self.maxDiff = None
-        self.assertEqual(
-            [('time', date_a),
-             ('time', date_b),
-             ('startTest', foo),
-             ('addError', foo, {}),
-             ('stopTest', foo),
-             ('time', date_c)], result._events)
-
-    def test_time_passes_through_filtered_tests(self):
-        # Passing a subunit stream through TestResultFilter preserves 'time'
-        # directives even if a specific test is filtered out.
-        date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
-        date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
-        date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
-        subunit_stream = _b('\n'.join([
-            "time: %s",
-            "test: foo",
-            "time: %s",
-            "success: foo",
-            "time: %s",
-            ""]) % (date_a, date_b, date_c))
-        result = ExtendedTestResult()
-        result_filter = TestResultFilter(result)
-        result_filter.startTestRun()
-        self.run_tests(result_filter, subunit_stream)
-        result_filter.stopTestRun()
-        foo = subunit.RemotedTestCase('foo')
-        self.maxDiff = None
-        self.assertEqual(
-            [('startTestRun',),
-             ('time', date_a),
-             ('time', date_c),
-             ('stopTestRun',),], result._events)
-
-    def test_skip_preserved(self):
-        subunit_stream = _b('\n'.join([
-            "test: foo",
-            "skip: foo",
-            ""]))
-        result = ExtendedTestResult()
-        result_filter = TestResultFilter(result)
-        self.run_tests(result_filter, subunit_stream)
-        foo = subunit.RemotedTestCase('foo')
-        self.assertEquals(
-            [('startTest', foo),
-             ('addSkip', foo, {}),
-             ('stopTest', foo), ], result._events)
-
-    if sys.version_info < (2, 7):
-        # These tests require Python >=2.7.
-        del test_fixup_expected_failures, test_fixup_expected_errors, test_fixup_unexpected_success
-
-
-class TestFilterCommand(TestCase):
-
-    def run_command(self, args, stream):
-        root = os.path.dirname(
-            os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-        script_path = os.path.join(root, 'filters', 'subunit-filter')
-        command = [sys.executable, script_path] + list(args)
-        ps = subprocess.Popen(
-            command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
-            stderr=subprocess.PIPE)
-        out, err = ps.communicate(stream)
-        if ps.returncode != 0:
-            raise RuntimeError("%s failed: %s" % (command, err))
-        return out
-
-    def test_default(self):
-        byte_stream = BytesIO()
-        stream = StreamResultToBytes(byte_stream)
-        stream.status(test_id="foo", test_status="inprogress")
-        stream.status(test_id="foo", test_status="skip")
-        output = self.run_command([], byte_stream.getvalue())
-        events = StreamResult()
-        ByteStreamToStreamResult(BytesIO(output)).run(events)
-        ids = set(event[1] for event in events._events)
-        self.assertEqual([
-            ('status', 'foo', 'inprogress'),
-            ('status', 'foo', 'skip'),
-            ], [event[:3] for event in events._events])
-
-    def test_tags(self):
-        byte_stream = BytesIO()
-        stream = StreamResultToBytes(byte_stream)
-        stream.status(
-            test_id="foo", test_status="inprogress", test_tags=set(["a"]))
-        stream.status(
-            test_id="foo", test_status="success", test_tags=set(["a"]))
-        stream.status(test_id="bar", test_status="inprogress")
-        stream.status(test_id="bar", test_status="inprogress")
-        stream.status(
-            test_id="baz", test_status="inprogress", test_tags=set(["a"]))
-        stream.status(
-            test_id="baz", test_status="success", test_tags=set(["a"]))
-        output = self.run_command(
-            ['-s', '--with-tag', 'a'], byte_stream.getvalue())
-        events = StreamResult()
-        ByteStreamToStreamResult(BytesIO(output)).run(events)
-        ids = set(event[1] for event in events._events)
-        self.assertEqual(set(['foo', 'baz']), ids)
-
-    def test_no_passthrough(self):
-        output = self.run_command(['--no-passthrough'], b'hi thar')
-        self.assertEqual(b'', output)
-
-    def test_passthrough(self):
-        output = self.run_command([], b'hi thar')
-        byte_stream = BytesIO()
-        stream = StreamResultToBytes(byte_stream)
-        stream.status(file_name="stdout", file_bytes=b'hi thar')
-        self.assertEqual(byte_stream.getvalue(), output)
diff --git a/lib/subunit/python/subunit/tests/test_subunit_stats.py b/lib/subunit/python/subunit/tests/test_subunit_stats.py
deleted file mode 100644
index 7c5e42d..0000000
--- a/lib/subunit/python/subunit/tests/test_subunit_stats.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Tests for subunit.TestResultStats."""
-
-import unittest
-
-from testtools.compat import _b, BytesIO, StringIO
-
-import subunit
-
-
-class TestTestResultStats(unittest.TestCase):
-    """Test for TestResultStats, a TestResult object that generates stats."""
-
-    def setUp(self):
-        self.output = StringIO()
-        self.result = subunit.TestResultStats(self.output)
-        self.input_stream = BytesIO()
-        self.test = subunit.ProtocolTestCase(self.input_stream)
-
-    def test_stats_empty(self):
-        self.test.run(self.result)
-        self.assertEqual(0, self.result.total_tests)
-        self.assertEqual(0, self.result.passed_tests)
-        self.assertEqual(0, self.result.failed_tests)
-        self.assertEqual(set(), self.result.seen_tags)
-
-    def setUpUsedStream(self):
-        self.input_stream.write(_b("""tags: global
-test passed
-success passed
-test failed
-tags: local
-failure failed
-test error
-error error
-test skipped
-skip skipped
-test todo
-xfail todo
-"""))
-        self.input_stream.seek(0)
-        self.test.run(self.result)
-    
-    def test_stats_smoke_everything(self):
-        # Statistics are calculated usefully.
-        self.setUpUsedStream()
-        self.assertEqual(5, self.result.total_tests)
-        self.assertEqual(2, self.result.passed_tests)
-        self.assertEqual(2, self.result.failed_tests)
-        self.assertEqual(1, self.result.skipped_tests)
-        self.assertEqual(set(["global", "local"]), self.result.seen_tags)
-
-    def test_stat_formatting(self):
-        expected = ("""
-Total tests:       5
-Passed tests:      2
-Failed tests:      2
-Skipped tests:     1
-Seen tags: global, local
-""")[1:]
-        self.setUpUsedStream()
-        self.result.formatStats()
-        self.assertEqual(expected, self.output.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_subunit_tags.py b/lib/subunit/python/subunit/tests/test_subunit_tags.py
deleted file mode 100644
index a16edc1..0000000
--- a/lib/subunit/python/subunit/tests/test_subunit_tags.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Tests for subunit.tag_stream."""
-
-from io import BytesIO
-
-import testtools
-from testtools.matchers import Contains
-
-import subunit
-import subunit.test_results
-
-
-class TestSubUnitTags(testtools.TestCase):
-
-    def setUp(self):
-        super(TestSubUnitTags, self).setUp()
-        self.original = BytesIO()
-        self.filtered = BytesIO()
-
-    def test_add_tag(self):
-        # Literal values to avoid set sort-order dependencies. Python code show
-        # derivation.
-        # reference = BytesIO()
-        # stream = subunit.StreamResultToBytes(reference)
-        # stream.status(
-        #     test_id='test', test_status='inprogress', test_tags=set(['quux', 'foo']))
-        # stream.status(
-        #     test_id='test', test_status='success', test_tags=set(['bar', 'quux', 'foo']))
-        reference = [
-            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
-                b'\x83\x1b\x04test\x03\x03bar\x04quux\x03fooqn\xab)',
-            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
-                b'\x83\x1b\x04test\x03\x04quux\x03foo\x03bar\xaf\xbd\x9d\xd6',
-            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
-                b'\x83\x1b\x04test\x03\x04quux\x03bar\x03foo\x03\x04b\r',
-            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
-                b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
-            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
-                b'\x83\x1b\x04test\x03\x03foo\x04quux\x03bar\x08\xc2X\x83',
-            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
-                b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
-            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
-                b'\x83\x1b\x04test\x03\x03foo\x03bar\x04quux:\x05e\x80',
-            ]
-        stream = subunit.StreamResultToBytes(self.original)
-        stream.status(
-            test_id='test', test_status='inprogress', test_tags=set(['foo']))
-        stream.status(
-            test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
-        self.original.seek(0)
-        self.assertEqual(
-            0, subunit.tag_stream(self.original, self.filtered, ["quux"]))
-        self.assertThat(reference, Contains(self.filtered.getvalue()))
-
-    def test_remove_tag(self):
-        reference = BytesIO()
-        stream = subunit.StreamResultToBytes(reference)
-        stream.status(
-            test_id='test', test_status='inprogress', test_tags=set(['foo']))
-        stream.status(
-            test_id='test', test_status='success', test_tags=set(['foo']))
-        stream = subunit.StreamResultToBytes(self.original)
-        stream.status(
-            test_id='test', test_status='inprogress', test_tags=set(['foo']))
-        stream.status(
-            test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
-        self.original.seek(0)
-        self.assertEqual(
-            0, subunit.tag_stream(self.original, self.filtered, ["-bar"]))
-        self.assertEqual(reference.getvalue(), self.filtered.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_tap2subunit.py b/lib/subunit/python/subunit/tests/test_tap2subunit.py
deleted file mode 100644
index 5b7c07a..0000000
--- a/lib/subunit/python/subunit/tests/test_tap2subunit.py
+++ /dev/null
@@ -1,387 +0,0 @@
-#
-#  subunit: extensions to python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-"""Tests for TAP2SubUnit."""
-
-from io import BytesIO, StringIO
-import unittest
-
-from testtools import TestCase
-from testtools.compat import _u
-from testtools.testresult.doubles import StreamResult
-
-import subunit
-
-UTF8_TEXT = 'text/plain; charset=UTF8'
-
-
-class TestTAP2SubUnit(TestCase):
-    """Tests for TAP2SubUnit.
-
-    These tests test TAP string data in, and subunit string data out.
-    This is ok because the subunit protocol is intended to be stable,
-    but it might be easier/pithier to write tests against TAP string in,
-    parsed subunit objects out (by hooking the subunit stream to a subunit
-    protocol server.
-    """
-
-    def setUp(self):
-        super(TestTAP2SubUnit, self).setUp()
-        self.tap = StringIO()
-        self.subunit = BytesIO()
-
-    def test_skip_entire_file(self):
-        # A file
-        # 1..- # Skipped: comment
-        # results in a single skipped test.
-        self.tap.write(_u("1..0 # Skipped: entire file skipped\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'file skip', 'skip', None, True,
-            'tap comment', b'Skipped: entire file skipped', True, None, None,
-            None)])
-
-    def test_ok_test_pass(self):
-        # A file
-        # ok
-        # results in a passed test with name 'test 1' (a synthetic name as tap
-        # does not require named fixtures - it is the first test in the tap
-        # stream).
-        self.tap.write(_u("ok\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'success', None, False, None,
-            None, True, None, None, None)])
-
-    def test_ok_test_number_pass(self):
-        # A file
-        # ok 1
-        # results in a passed test with name 'test 1'
-        self.tap.write(_u("ok 1\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'success', None, False, None,
-            None, True, None, None, None)])
-
-    def test_ok_test_number_description_pass(self):
-        # A file
-        # ok 1 - There is a description
-        # results in a passed test with name 'test 1 - There is a description'
-        self.tap.write(_u("ok 1 - There is a description\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1 - There is a description',
-            'success', None, False, None, None, True, None, None, None)])
-
-    def test_ok_test_description_pass(self):
-        # A file
-        # ok There is a description
-        # results in a passed test with name 'test 1 There is a description'
-        self.tap.write(_u("ok There is a description\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1 There is a description',
-            'success', None, False, None, None, True, None, None, None)])
-
-    def test_ok_SKIP_skip(self):
-        # A file
-        # ok # SKIP
-        # results in a skkip test with name 'test 1'
-        self.tap.write(_u("ok # SKIP\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'skip', None, False, None,
-            None, True, None, None, None)])
-
-    def test_ok_skip_number_comment_lowercase(self):
-        self.tap.write(_u("ok 1 # skip no samba environment available, skipping compilation\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'skip', None, False, 'tap comment',
-            b'no samba environment available, skipping compilation', True,
-            'text/plain; charset=UTF8', None, None)])
-
-    def test_ok_number_description_SKIP_skip_comment(self):
-        # A file
-        # ok 1 foo  # SKIP Not done yet
-        # results in a skip test with name 'test 1 foo' and a log of
-        # Not done yet
-        self.tap.write(_u("ok 1 foo  # SKIP Not done yet\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1 foo', 'skip', None, False,
-            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
-            None, None)])
-
-    def test_ok_SKIP_skip_comment(self):
-        # A file
-        # ok # SKIP Not done yet
-        # results in a skip test with name 'test 1' and a log of Not done yet
-        self.tap.write(_u("ok # SKIP Not done yet\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'skip', None, False,
-            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
-            None, None)])
-
-    def test_ok_TODO_xfail(self):
-        # A file
-        # ok # TODO
-        # results in a xfail test with name 'test 1'
-        self.tap.write(_u("ok # TODO\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'xfail', None, False, None,
-            None, True, None, None, None)])
-
-    def test_ok_TODO_xfail_comment(self):
-        # A file
-        # ok # TODO Not done yet
-        # results in a xfail test with name 'test 1' and a log of Not done yet
-        self.tap.write(_u("ok # TODO Not done yet\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([('status', 'test 1', 'xfail', None, False,
-            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
-            None, None)])
-
-    def test_bail_out_errors(self):
-        # A file with line in it
-        # Bail out! COMMENT
-        # is treated as an error
-        self.tap.write(_u("ok 1 foo\n"))
-        self.tap.write(_u("Bail out! Lifejacket engaged\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 foo', 'success', None, False, None, None, True,
-             None, None, None),
-            ('status', 'Bail out! Lifejacket engaged', 'fail', None, False,
-             None, None, True, None, None, None)])
-
-    def test_missing_test_at_end_with_plan_adds_error(self):
-        # A file
-        # 1..3
-        # ok first test
-        # not ok third test
-        # results in three tests, with the third being created
-        self.tap.write(_u('1..3\n'))
-        self.tap.write(_u('ok first test\n'))
-        self.tap.write(_u('not ok second test\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 first test', 'success', None, False, None,
-             None, True, None, None, None),
-            ('status', 'test 2 second test', 'fail', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 3', 'fail', None, False, 'tap meta',
-             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
-             None, None)])
-
-    def test_missing_test_with_plan_adds_error(self):
-        # A file
-        # 1..3
-        # ok first test
-        # not ok 3 third test
-        # results in three tests, with the second being created
-        self.tap.write(_u('1..3\n'))
-        self.tap.write(_u('ok first test\n'))
-        self.tap.write(_u('not ok 3 third test\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 first test', 'success', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 2', 'fail', None, False, 'tap meta',
-             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
-             None, None),
-            ('status', 'test 3 third test', 'fail', None, False, None, None,
-             True, None, None, None)])
-
-    def test_missing_test_no_plan_adds_error(self):
-        # A file
-        # ok first test
-        # not ok 3 third test
-        # results in three tests, with the second being created
-        self.tap.write(_u('ok first test\n'))
-        self.tap.write(_u('not ok 3 third test\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 first test', 'success', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 2', 'fail', None, False, 'tap meta',
-             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
-             None, None),
-            ('status', 'test 3 third test', 'fail', None, False, None, None,
-             True, None, None, None)])
-
-    def test_four_tests_in_a_row_trailing_plan(self):
-        # A file
-        # ok 1 - first test in a script with no plan at all
-        # not ok 2 - second
-        # ok 3 - third
-        # not ok 4 - fourth
-        # 1..4
-        # results in four tests numbered and named
-        self.tap.write(_u('ok 1 - first test in a script with trailing plan\n'))
-        self.tap.write(_u('not ok 2 - second\n'))
-        self.tap.write(_u('ok 3 - third\n'))
-        self.tap.write(_u('not ok 4 - fourth\n'))
-        self.tap.write(_u('1..4\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 - first test in a script with trailing plan',
-             'success', None, False, None, None, True, None, None, None),
-            ('status', 'test 2 - second', 'fail', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 3 - third', 'success', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
-             True, None, None, None)])
-
-    def test_four_tests_in_a_row_with_plan(self):
-        # A file
-        # 1..4
-        # ok 1 - first test in a script with no plan at all
-        # not ok 2 - second
-        # ok 3 - third
-        # not ok 4 - fourth
-        # results in four tests numbered and named
-        self.tap.write(_u('1..4\n'))
-        self.tap.write(_u('ok 1 - first test in a script with a plan\n'))
-        self.tap.write(_u('not ok 2 - second\n'))
-        self.tap.write(_u('ok 3 - third\n'))
-        self.tap.write(_u('not ok 4 - fourth\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 - first test in a script with a plan',
-             'success', None, False, None, None, True, None, None, None),
-            ('status', 'test 2 - second', 'fail', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 3 - third', 'success', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
-             True, None, None, None)])
-
-    def test_four_tests_in_a_row_no_plan(self):
-        # A file
-        # ok 1 - first test in a script with no plan at all
-        # not ok 2 - second
-        # ok 3 - third
-        # not ok 4 - fourth
-        # results in four tests numbered and named
-        self.tap.write(_u('ok 1 - first test in a script with no plan at all\n'))
-        self.tap.write(_u('not ok 2 - second\n'))
-        self.tap.write(_u('ok 3 - third\n'))
-        self.tap.write(_u('not ok 4 - fourth\n'))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1 - first test in a script with no plan at all',
-             'success', None, False, None, None, True, None, None, None),
-            ('status', 'test 2 - second', 'fail', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 3 - third', 'success', None, False, None, None,
-             True, None, None, None),
-            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
-             True, None, None, None)])
-
-    def test_todo_and_skip(self):
-        # A file
-        # not ok 1 - a fail but # TODO but is TODO
-        # not ok 2 - another fail # SKIP instead
-        # results in two tests, numbered and commented.
-        self.tap.write(_u("not ok 1 - a fail but # TODO but is TODO\n"))
-        self.tap.write(_u("not ok 2 - another fail # SKIP instead\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.subunit.seek(0)
-        events = StreamResult()
-        subunit.ByteStreamToStreamResult(self.subunit).run(events)
-        self.check_events([
-            ('status', 'test 1 - a fail but', 'xfail', None, False,
-             'tap comment', b'but is TODO', True, 'text/plain; charset=UTF8',
-             None, None),
-            ('status', 'test 2 - another fail', 'skip', None, False,
-             'tap comment', b'instead', True, 'text/plain; charset=UTF8',
-             None, None)])
-
-    def test_leading_comments_add_to_next_test_log(self):
-        # A file
-        # # comment
-        # ok 
-        # ok
-        # results in a single test with the comment included
-        # in the first test and not the second.
-        self.tap.write(_u("# comment\n"))
-        self.tap.write(_u("ok\n"))
-        self.tap.write(_u("ok\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1', 'success', None, False, 'tap comment',
-             b'# comment', True, 'text/plain; charset=UTF8', None, None),
-            ('status', 'test 2', 'success', None, False, None, None, True,
-             None, None, None)])
-    
-    def test_trailing_comments_are_included_in_last_test_log(self):
-        # A file
-        # ok foo
-        # ok foo
-        # # comment
-        # results in a two tests, with the second having the comment
-        # attached to its log.
-        self.tap.write(_u("ok\n"))
-        self.tap.write(_u("ok\n"))
-        self.tap.write(_u("# comment\n"))
-        self.tap.seek(0)
-        result = subunit.TAP2SubUnit(self.tap, self.subunit)
-        self.assertEqual(0, result)
-        self.check_events([
-            ('status', 'test 1', 'success', None, False, None, None, True,
-             None, None, None),
-            ('status', 'test 2', 'success', None, False, 'tap comment',
-             b'# comment', True, 'text/plain; charset=UTF8', None, None)])
-
-    def check_events(self, events):
-        self.subunit.seek(0)
-        eventstream = StreamResult()
-        subunit.ByteStreamToStreamResult(self.subunit).run(eventstream)
-        self.assertEqual(events, eventstream._events)
diff --git a/lib/subunit/python/subunit/tests/test_test_protocol.py b/lib/subunit/python/subunit/tests/test_test_protocol.py
deleted file mode 100644
index c6008f4..0000000
--- a/lib/subunit/python/subunit/tests/test_test_protocol.py
+++ /dev/null
@@ -1,1362 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import datetime
-import unittest
-import os
-
-from testtools import PlaceHolder, skipIf, TestCase, TestResult
-from testtools.compat import _b, _u, BytesIO
-from testtools.content import Content, TracebackContent, text_content
-from testtools.content_type import ContentType
-try:
-    from testtools.testresult.doubles import (
-        Python26TestResult,
-        Python27TestResult,
-        ExtendedTestResult,
-        )
-except ImportError:
-    from testtools.tests.helpers import (
-        Python26TestResult,
-        Python27TestResult,
-        ExtendedTestResult,
-        )
-from testtools.matchers import Contains
-
-import subunit
-from subunit.tests import (
-    _remote_exception_repr,
-    _remote_exception_str,
-    _remote_exception_str_chunked,
-    )
-import subunit.iso8601 as iso8601
-
-
-def details_to_str(details):
-    return TestResult()._err_details_to_string(None, details=details)
-
-
-class TestTestImports(unittest.TestCase):
-
-    def test_imports(self):
-        from subunit import DiscardStream
-        from subunit import TestProtocolServer
-        from subunit import RemotedTestCase
-        from subunit import RemoteError
-        from subunit import ExecTestCase
-        from subunit import IsolatedTestCase
-        from subunit import TestProtocolClient
-        from subunit import ProtocolTestCase
-
-
-class TestDiscardStream(unittest.TestCase):
-
-    def test_write(self):
-        subunit.DiscardStream().write("content")
-
-
-class TestProtocolServerForward(unittest.TestCase):
-
-    def test_story(self):
-        client = unittest.TestResult()
-        out = BytesIO()
-        protocol = subunit.TestProtocolServer(client, forward_stream=out)
-        pipe = BytesIO(_b("test old mcdonald\n"
-                        "success old mcdonald\n"))
-        protocol.readFrom(pipe)
-        self.assertEqual(client.testsRun, 1)
-        self.assertEqual(pipe.getvalue(), out.getvalue())
-
-    def test_not_command(self):
-        client = unittest.TestResult()
-        out = BytesIO()
-        protocol = subunit.TestProtocolServer(client,
-            stream=subunit.DiscardStream(), forward_stream=out)
-        pipe = BytesIO(_b("success old mcdonald\n"))
-        protocol.readFrom(pipe)
-        self.assertEqual(client.testsRun, 0)
-        self.assertEqual(_b(""), out.getvalue())
-
-
-class TestTestProtocolServerPipe(unittest.TestCase):
-
-    def test_story(self):
-        client = unittest.TestResult()
-        protocol = subunit.TestProtocolServer(client)
-        traceback = "foo.c:53:ERROR invalid state\n"
-        pipe = BytesIO(_b("test old mcdonald\n"
-                        "success old mcdonald\n"
-                        "test bing crosby\n"
-                        "failure bing crosby [\n"
-                        +  traceback +
-                        "]\n"
-                        "test an error\n"
-                        "error an error\n"))
-        protocol.readFrom(pipe)
-        bing = subunit.RemotedTestCase("bing crosby")
-        an_error = subunit.RemotedTestCase("an error")
-        self.assertEqual(client.errors,
-                         [(an_error, _remote_exception_repr + '\n')])
-        self.assertEqual(
-            client.failures,
-            [(bing, _remote_exception_repr + ": "
-              + details_to_str({'traceback': text_content(traceback)}) + "\n")])
-        self.assertEqual(client.testsRun, 3)
-
-    def test_non_test_characters_forwarded_immediately(self):
-        pass
-
-
-class TestTestProtocolServerStartTest(unittest.TestCase):
-
-    def setUp(self):
-        self.client = Python26TestResult()
-        self.stream = BytesIO()
-        self.protocol = subunit.TestProtocolServer(self.client, self.stream)
-
-    def test_start_test(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.assertEqual(self.client._events,
-            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
-    def test_start_testing(self):
-        self.protocol.lineReceived(_b("testing old mcdonald\n"))
-        self.assertEqual(self.client._events,
-            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
-    def test_start_test_colon(self):
-        self.protocol.lineReceived(_b("test: old mcdonald\n"))
-        self.assertEqual(self.client._events,
-            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
-    def test_indented_test_colon_ignored(self):
-        ignored_line = _b(" test: old mcdonald\n")
-        self.protocol.lineReceived(ignored_line)
-        self.assertEqual([], self.client._events)
-        self.assertEqual(self.stream.getvalue(), ignored_line)
-
-    def test_start_testing_colon(self):
-        self.protocol.lineReceived(_b("testing: old mcdonald\n"))
-        self.assertEqual(self.client._events,
-            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
-
-class TestTestProtocolServerPassThrough(unittest.TestCase):
-
-    def setUp(self):
-        self.stdout = BytesIO()
-        self.test = subunit.RemotedTestCase("old mcdonald")
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
-
-    def keywords_before_test(self):
-        self.protocol.lineReceived(_b("failure a\n"))
-        self.protocol.lineReceived(_b("failure: a\n"))
-        self.protocol.lineReceived(_b("error a\n"))
-        self.protocol.lineReceived(_b("error: a\n"))
-        self.protocol.lineReceived(_b("success a\n"))
-        self.protocol.lineReceived(_b("success: a\n"))
-        self.protocol.lineReceived(_b("successful a\n"))
-        self.protocol.lineReceived(_b("successful: a\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.assertEqual(self.stdout.getvalue(), _b("failure a\n"
-                                                 "failure: a\n"
-                                                 "error a\n"
-                                                 "error: a\n"
-                                                 "success a\n"
-                                                 "success: a\n"
-                                                 "successful a\n"
-                                                 "successful: a\n"
-                                                 "]\n"))
-
-    def test_keywords_before_test(self):
-        self.keywords_before_test()
-        self.assertEqual(self.client._events, [])
-
-    def test_keywords_after_error(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("error old mcdonald\n"))
-        self.keywords_before_test()
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, {}),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_keywords_after_failure(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("failure old mcdonald\n"))
-        self.keywords_before_test()
-        self.assertEqual(self.client._events, [
-            ('startTest', self.test),
-            ('addFailure', self.test, {}),
-            ('stopTest', self.test),
-            ])
-
-    def test_keywords_after_success(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("success old mcdonald\n"))
-        self.keywords_before_test()
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addSuccess', self.test),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_keywords_after_test(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("failure a\n"))
-        self.protocol.lineReceived(_b("failure: a\n"))
-        self.protocol.lineReceived(_b("error a\n"))
-        self.protocol.lineReceived(_b("error: a\n"))
-        self.protocol.lineReceived(_b("success a\n"))
-        self.protocol.lineReceived(_b("success: a\n"))
-        self.protocol.lineReceived(_b("successful a\n"))
-        self.protocol.lineReceived(_b("successful: a\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.protocol.lineReceived(_b("failure old mcdonald\n"))
-        self.assertEqual(self.stdout.getvalue(), _b("test old mcdonald\n"
-                                                 "failure a\n"
-                                                 "failure: a\n"
-                                                 "error a\n"
-                                                 "error: a\n"
-                                                 "success a\n"
-                                                 "success: a\n"
-                                                 "successful a\n"
-                                                 "successful: a\n"
-                                                 "]\n"))
-        self.assertEqual(self.client._events, [
-            ('startTest', self.test),
-            ('addFailure', self.test, {}),
-            ('stopTest', self.test),
-            ])
-
-    def test_keywords_during_failure(self):
-        # A smoke test to make sure that the details parsers have control
-        # appropriately.
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("failure: old mcdonald [\n"))
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("failure a\n"))
-        self.protocol.lineReceived(_b("failure: a\n"))
-        self.protocol.lineReceived(_b("error a\n"))
-        self.protocol.lineReceived(_b("error: a\n"))
-        self.protocol.lineReceived(_b("success a\n"))
-        self.protocol.lineReceived(_b("success: a\n"))
-        self.protocol.lineReceived(_b("successful a\n"))
-        self.protocol.lineReceived(_b("successful: a\n"))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.assertEqual(self.stdout.getvalue(), _b(""))
-        details = {}
-        details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}),
-            lambda:[_b(
-            "test old mcdonald\n"
-            "failure a\n"
-            "failure: a\n"
-            "error a\n"
-            "error: a\n"
-            "success a\n"
-            "success: a\n"
-            "successful a\n"
-            "successful: a\n"
-            "]\n")])
-        self.assertEqual(self.client._events, [
-            ('startTest', self.test),
-            ('addFailure', self.test, details),
-            ('stopTest', self.test),
-            ])
-
-    def test_stdout_passthrough(self):
-        """Lines received which cannot be interpreted as any protocol action
-        should be passed through to sys.stdout.
-        """
-        bytes = _b("randombytes\n")
-        self.protocol.lineReceived(bytes)
-        self.assertEqual(self.stdout.getvalue(), bytes)
-
-
-class TestTestProtocolServerLostConnection(unittest.TestCase):
-
-    def setUp(self):
-        self.client = Python26TestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.test = subunit.RemotedTestCase("old mcdonald")
-
-    def test_lost_connection_no_input(self):
-        self.protocol.lostConnection()
-        self.assertEqual([], self.client._events)
-
-    def test_lost_connection_after_start(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lostConnection()
-        failure = subunit.RemoteError(
-            _u("lost connection during test 'old mcdonald'"))
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, failure),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_lost_connected_after_error(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("error old mcdonald\n"))
-        self.protocol.lostConnection()
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, subunit.RemoteError(_u(""))),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def do_connection_lost(self, outcome, opening):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("%s old mcdonald %s" % (outcome, opening)))
-        self.protocol.lostConnection()
-        failure = subunit.RemoteError(
-            _u("lost connection during %s report of test 'old mcdonald'") %
-            outcome)
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, failure),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_lost_connection_during_error(self):
-        self.do_connection_lost("error", "[\n")
-
-    def test_lost_connection_during_error_details(self):
-        self.do_connection_lost("error", "[ multipart\n")
-
-    def test_lost_connected_after_failure(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("failure old mcdonald\n"))
-        self.protocol.lostConnection()
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addFailure', self.test, subunit.RemoteError(_u(""))),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_lost_connection_during_failure(self):
-        self.do_connection_lost("failure", "[\n")
-
-    def test_lost_connection_during_failure_details(self):
-        self.do_connection_lost("failure", "[ multipart\n")
-
-    def test_lost_connection_after_success(self):
-        self.protocol.lineReceived(_b("test old mcdonald\n"))
-        self.protocol.lineReceived(_b("success old mcdonald\n"))
-        self.protocol.lostConnection()
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addSuccess', self.test),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_lost_connection_during_success(self):
-        self.do_connection_lost("success", "[\n")
-
-    def test_lost_connection_during_success_details(self):
-        self.do_connection_lost("success", "[ multipart\n")
-
-    def test_lost_connection_during_skip(self):
-        self.do_connection_lost("skip", "[\n")
-
-    def test_lost_connection_during_skip_details(self):
-        self.do_connection_lost("skip", "[ multipart\n")
-
-    def test_lost_connection_during_xfail(self):
-        self.do_connection_lost("xfail", "[\n")
-
-    def test_lost_connection_during_xfail_details(self):
-        self.do_connection_lost("xfail", "[ multipart\n")
-
-    def test_lost_connection_during_uxsuccess(self):
-        self.do_connection_lost("uxsuccess", "[\n")
-
-    def test_lost_connection_during_uxsuccess_details(self):
-        self.do_connection_lost("uxsuccess", "[ multipart\n")
-
-
-class TestInTestMultipart(unittest.TestCase):
-
-    def setUp(self):
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = subunit.RemotedTestCase(_u("mcdonalds farm"))
-
-    def test__outcome_sets_details_parser(self):
-        self.protocol._reading_success_details.details_parser = None
-        self.protocol._state._outcome(0, _b("mcdonalds farm [ multipart\n"),
-            None, self.protocol._reading_success_details)
-        parser = self.protocol._reading_success_details.details_parser
-        self.assertNotEqual(None, parser)
-        self.assertTrue(isinstance(parser,
-            subunit.details.MultipartDetailsParser))
-
-
-class TestTestProtocolServerAddError(unittest.TestCase):
-
-    def setUp(self):
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = subunit.RemotedTestCase("mcdonalds farm")
-
-    def simple_error_keyword(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        details = {}
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_simple_error(self):
-        self.simple_error_keyword("error")
-
-    def test_simple_error_colon(self):
-        self.simple_error_keyword("error:")
-
-    def test_error_empty_message(self):
-        self.protocol.lineReceived(_b("error mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}), lambda:[_b("")])
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def error_quoted_bracket(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}), lambda:[_b("]\n")])
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addError', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_error_quoted_bracket(self):
-        self.error_quoted_bracket("error")
-
-    def test_error_colon_quoted_bracket(self):
-        self.error_quoted_bracket("error:")
-
-
-class TestTestProtocolServerAddFailure(unittest.TestCase):
-
-    def setUp(self):
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = subunit.RemotedTestCase("mcdonalds farm")
-
-    def assertFailure(self, details):
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addFailure', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def simple_failure_keyword(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        details = {}
-        self.assertFailure(details)
-
-    def test_simple_failure(self):
-        self.simple_failure_keyword("failure")
-
-    def test_simple_failure_colon(self):
-        self.simple_failure_keyword("failure:")
-
-    def test_failure_empty_message(self):
-        self.protocol.lineReceived(_b("failure mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}), lambda:[_b("")])
-        self.assertFailure(details)
-
-    def failure_quoted_bracket(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}), lambda:[_b("]\n")])
-        self.assertFailure(details)
-
-    def test_failure_quoted_bracket(self):
-        self.failure_quoted_bracket("failure")
-
-    def test_failure_colon_quoted_bracket(self):
-        self.failure_quoted_bracket("failure:")
-
-
-class TestTestProtocolServerAddxFail(unittest.TestCase):
-    """Tests for the xfail keyword.
-
-    In Python this can thunk through to Success due to stdlib limitations (see
-    README).
-    """
-
-    def capture_expected_failure(self, test, err):
-        self._events.append((test, err))
-
-    def setup_python26(self):
-        """Setup a test object ready to be xfailed and thunk to success."""
-        self.client = Python26TestResult()
-        self.setup_protocol()
-
-    def setup_python27(self):
-        """Setup a test object ready to be xfailed."""
-        self.client = Python27TestResult()
-        self.setup_protocol()
-
-    def setup_python_ex(self):
-        """Setup a test object ready to be xfailed with details."""
-        self.client = ExtendedTestResult()
-        self.setup_protocol()
-
-    def setup_protocol(self):
-        """Setup the protocol based on self.client."""
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = self.client._events[-1][-1]
-
-    def simple_xfail_keyword(self, keyword, as_success):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        self.check_success_or_xfail(as_success)
-
-    def check_success_or_xfail(self, as_success, error_message=None):
-        if as_success:
-            self.assertEqual([
-                ('startTest', self.test),
-                ('addSuccess', self.test),
-                ('stopTest', self.test),
-                ], self.client._events)
-        else:
-            details = {}
-            if error_message is not None:
-                details['traceback'] = Content(
-                    ContentType("text", "x-traceback", {'charset': 'utf8'}),
-                    lambda:[_b(error_message)])
-            if isinstance(self.client, ExtendedTestResult):
-                value = details
-            else:
-                if error_message is not None:
-                    value = subunit.RemoteError(details_to_str(details))
-                else:
-                    value = subunit.RemoteError()
-            self.assertEqual([
-                ('startTest', self.test),
-                ('addExpectedFailure', self.test, value),
-                ('stopTest', self.test),
-                ], self.client._events)
-
-    def test_simple_xfail(self):
-        self.setup_python26()
-        self.simple_xfail_keyword("xfail", True)
-        self.setup_python27()
-        self.simple_xfail_keyword("xfail",  False)
-        self.setup_python_ex()
-        self.simple_xfail_keyword("xfail",  False)
-
-    def test_simple_xfail_colon(self):
-        self.setup_python26()
-        self.simple_xfail_keyword("xfail:", True)
-        self.setup_python27()
-        self.simple_xfail_keyword("xfail:", False)
-        self.setup_python_ex()
-        self.simple_xfail_keyword("xfail:", False)
-
-    def test_xfail_empty_message(self):
-        self.setup_python26()
-        self.empty_message(True)
-        self.setup_python27()
-        self.empty_message(False)
-        self.setup_python_ex()
-        self.empty_message(False, error_message="")
-
-    def empty_message(self, as_success, error_message="\n"):
-        self.protocol.lineReceived(_b("xfail mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.check_success_or_xfail(as_success, error_message)
-
-    def xfail_quoted_bracket(self, keyword, as_success):
-        # This tests it is accepted, but cannot test it is used today, because
-        # of not having a way to expose it in Python so far.
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.check_success_or_xfail(as_success, "]\n")
-
-    def test_xfail_quoted_bracket(self):
-        self.setup_python26()
-        self.xfail_quoted_bracket("xfail", True)
-        self.setup_python27()
-        self.xfail_quoted_bracket("xfail", False)
-        self.setup_python_ex()
-        self.xfail_quoted_bracket("xfail", False)
-
-    def test_xfail_colon_quoted_bracket(self):
-        self.setup_python26()
-        self.xfail_quoted_bracket("xfail:", True)
-        self.setup_python27()
-        self.xfail_quoted_bracket("xfail:", False)
-        self.setup_python_ex()
-        self.xfail_quoted_bracket("xfail:", False)
-
-
-class TestTestProtocolServerAddunexpectedSuccess(TestCase):
-    """Tests for the uxsuccess keyword."""
-
-    def capture_expected_failure(self, test, err):
-        self._events.append((test, err))
-
-    def setup_python26(self):
-        """Setup a test object ready to be xfailed and thunk to success."""
-        self.client = Python26TestResult()
-        self.setup_protocol()
-
-    def setup_python27(self):
-        """Setup a test object ready to be xfailed."""
-        self.client = Python27TestResult()
-        self.setup_protocol()
-
-    def setup_python_ex(self):
-        """Setup a test object ready to be xfailed with details."""
-        self.client = ExtendedTestResult()
-        self.setup_protocol()
-
-    def setup_protocol(self):
-        """Setup the protocol based on self.client."""
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = self.client._events[-1][-1]
-
-    def simple_uxsuccess_keyword(self, keyword, as_fail):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        self.check_fail_or_uxsuccess(as_fail)
-
-    def check_fail_or_uxsuccess(self, as_fail, error_message=None):
-        details = {}
-        if error_message is not None:
-            details['traceback'] = Content(
-                ContentType("text", "x-traceback", {'charset': 'utf8'}),
-                lambda:[_b(error_message)])
-        if isinstance(self.client, ExtendedTestResult):
-            value = details
-        else:
-            value = None
-        if as_fail:
-            self.client._events[1] = self.client._events[1][:2]
-            # The value is generated within the extended to original decorator:
-            # todo use the testtools matcher to check on this.
-            self.assertEqual([
-                ('startTest', self.test),
-                ('addFailure', self.test),
-                ('stopTest', self.test),
-                ], self.client._events)
-        elif value:
-            self.assertEqual([
-                ('startTest', self.test),
-                ('addUnexpectedSuccess', self.test, value),
-                ('stopTest', self.test),
-                ], self.client._events)
-        else:
-            self.assertEqual([
-                ('startTest', self.test),
-                ('addUnexpectedSuccess', self.test),
-                ('stopTest', self.test),
-                ], self.client._events)
-
-    def test_simple_uxsuccess(self):
-        self.setup_python26()
-        self.simple_uxsuccess_keyword("uxsuccess", True)
-        self.setup_python27()
-        self.simple_uxsuccess_keyword("uxsuccess",  False)
-        self.setup_python_ex()
-        self.simple_uxsuccess_keyword("uxsuccess",  False)
-
-    def test_simple_uxsuccess_colon(self):
-        self.setup_python26()
-        self.simple_uxsuccess_keyword("uxsuccess:", True)
-        self.setup_python27()
-        self.simple_uxsuccess_keyword("uxsuccess:", False)
-        self.setup_python_ex()
-        self.simple_uxsuccess_keyword("uxsuccess:", False)
-
-    def test_uxsuccess_empty_message(self):
-        self.setup_python26()
-        self.empty_message(True)
-        self.setup_python27()
-        self.empty_message(False)
-        self.setup_python_ex()
-        self.empty_message(False, error_message="")
-
-    def empty_message(self, as_fail, error_message="\n"):
-        self.protocol.lineReceived(_b("uxsuccess mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.check_fail_or_uxsuccess(as_fail, error_message)
-
-    def uxsuccess_quoted_bracket(self, keyword, as_fail):
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.check_fail_or_uxsuccess(as_fail, "]\n")
-
-    def test_uxsuccess_quoted_bracket(self):
-        self.setup_python26()
-        self.uxsuccess_quoted_bracket("uxsuccess", True)
-        self.setup_python27()
-        self.uxsuccess_quoted_bracket("uxsuccess", False)
-        self.setup_python_ex()
-        self.uxsuccess_quoted_bracket("uxsuccess", False)
-
-    def test_uxsuccess_colon_quoted_bracket(self):
-        self.setup_python26()
-        self.uxsuccess_quoted_bracket("uxsuccess:", True)
-        self.setup_python27()
-        self.uxsuccess_quoted_bracket("uxsuccess:", False)
-        self.setup_python_ex()
-        self.uxsuccess_quoted_bracket("uxsuccess:", False)
-
-
-class TestTestProtocolServerAddSkip(unittest.TestCase):
-    """Tests for the skip keyword.
-
-    In Python this meets the testtools extended TestResult contract.
-    (See https://launchpad.net/testtools).
-    """
-
-    def setUp(self):
-        """Setup a test object ready to be skipped."""
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = self.client._events[-1][-1]
-
-    def assertSkip(self, reason):
-        details = {}
-        if reason is not None:
-            details['reason'] = Content(
-                ContentType("text", "plain"), lambda:[reason])
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addSkip', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def simple_skip_keyword(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        self.assertSkip(None)
-
-    def test_simple_skip(self):
-        self.simple_skip_keyword("skip")
-
-    def test_simple_skip_colon(self):
-        self.simple_skip_keyword("skip:")
-
-    def test_skip_empty_message(self):
-        self.protocol.lineReceived(_b("skip mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.assertSkip(_b(""))
-
-    def skip_quoted_bracket(self, keyword):
-        # This tests it is accepted, but cannot test it is used today, because
-        # of not having a way to expose it in Python so far.
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        self.assertSkip(_b("]\n"))
-
-    def test_skip_quoted_bracket(self):
-        self.skip_quoted_bracket("skip")
-
-    def test_skip_colon_quoted_bracket(self):
-        self.skip_quoted_bracket("skip:")
-
-
-class TestTestProtocolServerAddSuccess(unittest.TestCase):
-
-    def setUp(self):
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        self.test = subunit.RemotedTestCase("mcdonalds farm")
-
-    def simple_success_keyword(self, keyword):
-        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addSuccess', self.test),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_simple_success(self):
-        self.simple_success_keyword("successful")
-
-    def test_simple_success_colon(self):
-        self.simple_success_keyword("successful:")
-
-    def assertSuccess(self, details):
-        self.assertEqual([
-            ('startTest', self.test),
-            ('addSuccess', self.test, details),
-            ('stopTest', self.test),
-            ], self.client._events)
-
-    def test_success_empty_message(self):
-        self.protocol.lineReceived(_b("success mcdonalds farm [\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['message'] = Content(ContentType("text", "plain"),
-            lambda:[_b("")])
-        self.assertSuccess(details)
-
-    def success_quoted_bracket(self, keyword):
-        # This tests it is accepted, but cannot test it is used today, because
-        # of not having a way to expose it in Python so far.
-        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
-        self.protocol.lineReceived(_b(" ]\n"))
-        self.protocol.lineReceived(_b("]\n"))
-        details = {}
-        details['message'] = Content(ContentType("text", "plain"),
-            lambda:[_b("]\n")])
-        self.assertSuccess(details)
-
-    def test_success_quoted_bracket(self):
-        self.success_quoted_bracket("success")
-
-    def test_success_colon_quoted_bracket(self):
-        self.success_quoted_bracket("success:")
-
-
-class TestTestProtocolServerProgress(unittest.TestCase):
-    """Test receipt of progress: directives."""
-
-    def test_progress_accepted_stdlib(self):
-        self.result = Python26TestResult()
-        self.stream = BytesIO()
-        self.protocol = subunit.TestProtocolServer(self.result,
-            stream=self.stream)
-        self.protocol.lineReceived(_b("progress: 23"))
-        self.protocol.lineReceived(_b("progress: -2"))
-        self.protocol.lineReceived(_b("progress: +4"))
-        self.assertEqual(_b(""), self.stream.getvalue())
-
-    def test_progress_accepted_extended(self):
-        # With a progress capable TestResult, progress events are emitted.
-        self.result = ExtendedTestResult()
-        self.stream = BytesIO()
-        self.protocol = subunit.TestProtocolServer(self.result,
-            stream=self.stream)
-        self.protocol.lineReceived(_b("progress: 23"))
-        self.protocol.lineReceived(_b("progress: push"))
-        self.protocol.lineReceived(_b("progress: -2"))
-        self.protocol.lineReceived(_b("progress: pop"))
-        self.protocol.lineReceived(_b("progress: +4"))
-        self.assertEqual(_b(""), self.stream.getvalue())
-        self.assertEqual([
-            ('progress', 23, subunit.PROGRESS_SET),
-            ('progress', None, subunit.PROGRESS_PUSH),
-            ('progress', -2, subunit.PROGRESS_CUR),
-            ('progress', None, subunit.PROGRESS_POP),
-            ('progress', 4, subunit.PROGRESS_CUR),
-            ], self.result._events)
-
-
-class TestTestProtocolServerStreamTags(unittest.TestCase):
-    """Test managing tags on the protocol level."""
-
-    def setUp(self):
-        self.client = ExtendedTestResult()
-        self.protocol = subunit.TestProtocolServer(self.client)
-
-    def test_initial_tags(self):
-        self.protocol.lineReceived(_b("tags: foo bar:baz  quux\n"))
-        self.assertEqual([
-            ('tags', set(["foo", "bar:baz", "quux"]), set()),
-            ], self.client._events)
-
-    def test_minus_removes_tags(self):
-        self.protocol.lineReceived(_b("tags: -bar quux\n"))
-        self.assertEqual([
-            ('tags', set(["quux"]), set(["bar"])),
-            ], self.client._events)
-
-    def test_tags_do_not_get_set_on_test(self):
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        test = self.client._events[0][-1]
-        self.assertEqual(None, getattr(test, 'tags', None))
-
-    def test_tags_do_not_get_set_on_global_tags(self):
-        self.protocol.lineReceived(_b("tags: foo bar\n"))
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        test = self.client._events[-1][-1]
-        self.assertEqual(None, getattr(test, 'tags', None))
-
-    def test_tags_get_set_on_test_tags(self):
-        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
-        test = self.client._events[-1][-1]
-        self.protocol.lineReceived(_b("tags: foo bar\n"))
-        self.protocol.lineReceived(_b("success mcdonalds farm\n"))
-        self.assertEqual(None, getattr(test, 'tags', None))
-
-
-class TestTestProtocolServerStreamTime(unittest.TestCase):
-    """Test managing time information at the protocol level."""
-
-    def test_time_accepted_stdlib(self):
-        self.result = Python26TestResult()
-        self.stream = BytesIO()
-        self.protocol = subunit.TestProtocolServer(self.result,
-            stream=self.stream)
-        self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
-        self.assertEqual(_b(""), self.stream.getvalue())
-
-    def test_time_accepted_extended(self):
-        self.result = ExtendedTestResult()
-        self.stream = BytesIO()
-        self.protocol = subunit.TestProtocolServer(self.result,
-            stream=self.stream)
-        self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
-        self.assertEqual(_b(""), self.stream.getvalue())
-        self.assertEqual([
-            ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
-            iso8601.Utc()))
-            ], self.result._events)
-
-
-class TestRemotedTestCase(unittest.TestCase):
-
-    def test_simple(self):
-        test = subunit.RemotedTestCase("A test description")
-        self.assertRaises(NotImplementedError, test.setUp)
-        self.assertRaises(NotImplementedError, test.tearDown)
-        self.assertEqual("A test description",
-                         test.shortDescription())
-        self.assertEqual("A test description",
-                         test.id())
-        self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
-        self.assertEqual("<subunit.RemotedTestCase description="
-                         "'A test description'>", "%r" % test)
-        result = unittest.TestResult()
-        test.run(result)
-        self.assertEqual([(test, _remote_exception_repr + ": "
-                                 "Cannot run RemotedTestCases.\n\n")],
-                         result.errors)
-        self.assertEqual(1, result.testsRun)
-        another_test = subunit.RemotedTestCase("A test description")
-        self.assertEqual(test, another_test)
-        different_test = subunit.RemotedTestCase("ofo")
-        self.assertNotEqual(test, different_test)
-        self.assertNotEqual(another_test, different_test)
-
-
-class TestRemoteError(unittest.TestCase):
-
-    def test_eq(self):
-        error = subunit.RemoteError(_u("Something went wrong"))
-        another_error = subunit.RemoteError(_u("Something went wrong"))
-        different_error = subunit.RemoteError(_u("boo!"))
-        self.assertEqual(error, another_error)
-        self.assertNotEqual(error, different_error)
-        self.assertNotEqual(different_error, another_error)
-
-    def test_empty_constructor(self):
-        self.assertEqual(subunit.RemoteError(), subunit.RemoteError(_u("")))
-
-
-class TestExecTestCase(unittest.TestCase):
-
-    class SampleExecTestCase(subunit.ExecTestCase):
-
-        def test_sample_method(self):
-            """sample-script.py"""
-            # the sample script runs three tests, one each
-            # that fails, errors and succeeds
-
-        def test_sample_method_args(self):
-            """sample-script.py foo"""
-            # sample that will run just one test.
-
-    def test_construct(self):
-        test = self.SampleExecTestCase("test_sample_method")
-        self.assertEqual(test.script,
-                         subunit.join_dir(__file__, 'sample-script.py'))
-
-    def test_args(self):
-        result = unittest.TestResult()
-        test = self.SampleExecTestCase("test_sample_method_args")
-        test.run(result)
-        self.assertEqual(1, result.testsRun)
-
-    def test_run(self):
-        result = ExtendedTestResult()
-        test = self.SampleExecTestCase("test_sample_method")
-        test.run(result)
-        mcdonald = subunit.RemotedTestCase("old mcdonald")
-        bing = subunit.RemotedTestCase("bing crosby")
-        bing_details = {}
-        bing_details['traceback'] = Content(ContentType("text", "x-traceback",
-            {'charset': 'utf8'}), lambda:[_b("foo.c:53:ERROR invalid state\n")])
-        an_error = subunit.RemotedTestCase("an error")
-        error_details = {}
-        self.assertEqual([
-            ('startTest', mcdonald),
-            ('addSuccess', mcdonald),
-            ('stopTest', mcdonald),
-            ('startTest', bing),
-            ('addFailure', bing, bing_details),
-            ('stopTest', bing),
-            ('startTest', an_error),
-            ('addError', an_error, error_details),
-            ('stopTest', an_error),
-            ], result._events)
-
-    def test_debug(self):
-        test = self.SampleExecTestCase("test_sample_method")
-        test.debug()
-
-    def test_count_test_cases(self):
-        """TODO run the child process and count responses to determine the count."""
-
-    def test_join_dir(self):
-        sibling = subunit.join_dir(__file__, 'foo')
-        filedir = os.path.abspath(os.path.dirname(__file__))
-        expected = os.path.join(filedir, 'foo')
-        self.assertEqual(sibling, expected)
-
-
-class DoExecTestCase(subunit.ExecTestCase):
-
-    def test_working_script(self):
-        """sample-two-script.py"""
-
-
-class TestIsolatedTestCase(TestCase):
-
-    class SampleIsolatedTestCase(subunit.IsolatedTestCase):
-
-        SETUP = False
-        TEARDOWN = False
-        TEST = False
-
-        def setUp(self):
-            TestIsolatedTestCase.SampleIsolatedTestCase.SETUP = True
-
-        def tearDown(self):
-            TestIsolatedTestCase.SampleIsolatedTestCase.TEARDOWN = True
-
-        def test_sets_global_state(self):
-            TestIsolatedTestCase.SampleIsolatedTestCase.TEST = True
-
-
-    def test_construct(self):
-        self.SampleIsolatedTestCase("test_sets_global_state")
-
-    @skipIf(os.name != "posix", "Need a posix system for forking tests")
-    def test_run(self):
-        result = unittest.TestResult()
-        test = self.SampleIsolatedTestCase("test_sets_global_state")
-        test.run(result)
-        self.assertEqual(result.testsRun, 1)
-        self.assertEqual(self.SampleIsolatedTestCase.SETUP, False)
-        self.assertEqual(self.SampleIsolatedTestCase.TEARDOWN, False)
-        self.assertEqual(self.SampleIsolatedTestCase.TEST, False)
-
-    def test_debug(self):
-        pass
-        #test = self.SampleExecTestCase("test_sample_method")
-        #test.debug()
-
-
-class TestIsolatedTestSuite(TestCase):
-
-    class SampleTestToIsolate(unittest.TestCase):
-
-        SETUP = False
-        TEARDOWN = False
-        TEST = False
-
-        def setUp(self):
-            TestIsolatedTestSuite.SampleTestToIsolate.SETUP = True
-
-        def tearDown(self):
-            TestIsolatedTestSuite.SampleTestToIsolate.TEARDOWN = True
-
-        def test_sets_global_state(self):
-            TestIsolatedTestSuite.SampleTestToIsolate.TEST = True
-
-
-    def test_construct(self):
-        subunit.IsolatedTestSuite()
-
-    @skipIf(os.name != "posix", "Need a posix system for forking tests")
-    def test_run(self):
-        result = unittest.TestResult()
-        suite = subunit.IsolatedTestSuite()
-        sub_suite = unittest.TestSuite()
-        sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
-        sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
-        suite.addTest(sub_suite)
-        suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
-        suite.run(result)
-        self.assertEqual(result.testsRun, 3)
-        self.assertEqual(self.SampleTestToIsolate.SETUP, False)
-        self.assertEqual(self.SampleTestToIsolate.TEARDOWN, False)
-        self.assertEqual(self.SampleTestToIsolate.TEST, False)
-
-
-class TestTestProtocolClient(TestCase):
-
-    def setUp(self):
-        super(TestTestProtocolClient, self).setUp()
-        self.io = BytesIO()
-        self.protocol = subunit.TestProtocolClient(self.io)
-        self.unicode_test = PlaceHolder(_u('\u2603'))
-        self.test = TestTestProtocolClient("test_start_test")
-        self.sample_details = {'something':Content(
-            ContentType('text', 'plain'), lambda:[_b('serialised\nform')])}
-        self.sample_tb_details = dict(self.sample_details)
-        self.sample_tb_details['traceback'] = TracebackContent(
-            subunit.RemoteError(_u("boo qux")), self.test)
-
-    def test_start_test(self):
-        """Test startTest on a TestProtocolClient."""
-        self.protocol.startTest(self.test)
-        self.assertEqual(self.io.getvalue(), _b("test: %s\n" % self.test.id()))
-
-    def test_start_test_unicode_id(self):
-        """Test startTest on a TestProtocolClient."""
-        self.protocol.startTest(self.unicode_test)
-        expected = _b("test: ") + _u('\u2603').encode('utf8') + _b("\n")
-        self.assertEqual(expected, self.io.getvalue())
-
-    def test_stop_test(self):
-        # stopTest doesn't output anything.
-        self.protocol.stopTest(self.test)
-        self.assertEqual(self.io.getvalue(), _b(""))
-
-    def test_add_success(self):
-        """Test addSuccess on a TestProtocolClient."""
-        self.protocol.addSuccess(self.test)
-        self.assertEqual(
-            self.io.getvalue(), _b("successful: %s\n" % self.test.id()))
-
-    def test_add_outcome_unicode_id(self):
-        """Test addSuccess on a TestProtocolClient."""
-        self.protocol.addSuccess(self.unicode_test)
-        expected = _b("successful: ") + _u('\u2603').encode('utf8') + _b("\n")
-        self.assertEqual(expected, self.io.getvalue())
-
-    def test_add_success_details(self):
-        """Test addSuccess on a TestProtocolClient with details."""
-        self.protocol.addSuccess(self.test, details=self.sample_details)
-        self.assertEqual(
-            self.io.getvalue(), _b("successful: %s [ multipart\n"
-                "Content-Type: text/plain\n"
-                "something\n"
-                "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
-
-    def test_add_failure(self):
-        """Test addFailure on a TestProtocolClient."""
-        self.protocol.addFailure(
-            self.test, subunit.RemoteError(_u("boo qux")))
-        self.assertEqual(
-            self.io.getvalue(),
-            _b(('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
-            % self.test.id()))
-
-    def test_add_failure_details(self):
-        """Test addFailure on a TestProtocolClient with details."""
-        self.protocol.addFailure(
-            self.test, details=self.sample_tb_details)
-        self.assertThat([
-            _b(("failure: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            _b(("failure: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            ],
-            Contains(self.io.getvalue())),
-
-    def test_add_error(self):
-        """Test stopTest on a TestProtocolClient."""
-        self.protocol.addError(
-            self.test, subunit.RemoteError(_u("phwoar crikey")))
-        self.assertEqual(
-            self.io.getvalue(),
-            _b(('error: %s [\n' +
-            _remote_exception_str + ": phwoar crikey\n"
-            "]\n") % self.test.id()))
-
-    def test_add_error_details(self):
-        """Test stopTest on a TestProtocolClient with details."""
-        self.protocol.addError(
-            self.test, details=self.sample_tb_details)
-        self.assertThat([
-            _b(("error: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            _b(("error: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            ],
-            Contains(self.io.getvalue())),
-
-    def test_add_expected_failure(self):
-        """Test addExpectedFailure on a TestProtocolClient."""
-        self.protocol.addExpectedFailure(
-            self.test, subunit.RemoteError(_u("phwoar crikey")))
-        self.assertEqual(
-            self.io.getvalue(),
-            _b(('xfail: %s [\n' +
-            _remote_exception_str + ": phwoar crikey\n"
-            "]\n") % self.test.id()))
-
-    def test_add_expected_failure_details(self):
-        """Test addExpectedFailure on a TestProtocolClient with details."""
-        self.protocol.addExpectedFailure(
-            self.test, details=self.sample_tb_details)
-        self.assertThat([
-            _b(("xfail: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            _b(("xfail: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "something\n"
-            "F\r\nserialised\nform0\r\n"
-            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
-            "traceback\n" + _remote_exception_str_chunked +
-            "]\n") % self.test.id()),
-            ],
-            Contains(self.io.getvalue())),
-
-    def test_add_skip(self):
-        """Test addSkip on a TestProtocolClient."""
-        self.protocol.addSkip(
-            self.test, "Has it really?")
-        self.assertEqual(
-            self.io.getvalue(),
-            _b('skip: %s [\nHas it really?\n]\n' % self.test.id()))
-
-    def test_add_skip_details(self):
-        """Test addSkip on a TestProtocolClient with details."""
-        details = {'reason':Content(
-            ContentType('text', 'plain'), lambda:[_b('Has it really?')])}
-        self.protocol.addSkip(self.test, details=details)
-        self.assertEqual(
-            self.io.getvalue(),
-            _b("skip: %s [ multipart\n"
-            "Content-Type: text/plain\n"
-            "reason\n"
-            "E\r\nHas it really?0\r\n"
-            "]\n" % self.test.id()))
-
-    def test_progress_set(self):
-        self.protocol.progress(23, subunit.PROGRESS_SET)
-        self.assertEqual(self.io.getvalue(), _b('progress: 23\n'))
-
-    def test_progress_neg_cur(self):
-        self.protocol.progress(-23, subunit.PROGRESS_CUR)
-        self.assertEqual(self.io.getvalue(), _b('progress: -23\n'))
-
-    def test_progress_pos_cur(self):
-        self.protocol.progress(23, subunit.PROGRESS_CUR)
-        self.assertEqual(self.io.getvalue(), _b('progress: +23\n'))
-
-    def test_progress_pop(self):
-        self.protocol.progress(1234, subunit.PROGRESS_POP)
-        self.assertEqual(self.io.getvalue(), _b('progress: pop\n'))
-
-    def test_progress_push(self):
-        self.protocol.progress(1234, subunit.PROGRESS_PUSH)
-        self.assertEqual(self.io.getvalue(), _b('progress: push\n'))
-
-    def test_time(self):
-        # Calling time() outputs a time signal immediately.
-        self.protocol.time(
-            datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
-        self.assertEqual(
-            _b("time: 2009-10-11 12:13:14.000015Z\n"),
-            self.io.getvalue())
-
-    def test_add_unexpected_success(self):
-        """Test addUnexpectedSuccess on a TestProtocolClient."""
-        self.protocol.addUnexpectedSuccess(self.test)
-        self.assertEqual(
-            self.io.getvalue(), _b("uxsuccess: %s\n" % self.test.id()))
-
-    def test_add_unexpected_success_details(self):
-        """Test addUnexpectedSuccess on a TestProtocolClient with details."""
-        self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
-        self.assertEqual(
-            self.io.getvalue(), _b("uxsuccess: %s [ multipart\n"
-                "Content-Type: text/plain\n"
-                "something\n"
-                "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
-
-    def test_tags_empty(self):
-        self.protocol.tags(set(), set())
-        self.assertEqual(_b(""), self.io.getvalue())
-
-    def test_tags_add(self):
-        self.protocol.tags(set(['foo']), set())
-        self.assertEqual(_b("tags: foo\n"), self.io.getvalue())
-
-    def test_tags_both(self):
-        self.protocol.tags(set(['quux']), set(['bar']))
-        self.assertThat(
-            [b"tags: quux -bar\n", b"tags: -bar quux\n"],
-            Contains(self.io.getvalue()))
-
-    def test_tags_gone(self):
-        self.protocol.tags(set(), set(['bar']))
-        self.assertEqual(_b("tags: -bar\n"), self.io.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_test_protocol2.py b/lib/subunit/python/subunit/tests/test_test_protocol2.py
deleted file mode 100644
index c21392c..0000000
--- a/lib/subunit/python/subunit/tests/test_test_protocol2.py
+++ /dev/null
@@ -1,436 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-from io import BytesIO
-import datetime
-
-from testtools import TestCase
-from testtools.matchers import Contains, HasLength
-from testtools.tests.test_testresult import TestStreamResultContract
-from testtools.testresult.doubles import StreamResult
-
-import subunit
-import subunit.iso8601 as iso8601
-
-CONSTANT_ENUM = b'\xb3)\x01\x0c\x03foo\x08U_\x1b'
-CONSTANT_INPROGRESS = b'\xb3)\x02\x0c\x03foo\x8e\xc1-\xb5'
-CONSTANT_SUCCESS = b'\xb3)\x03\x0c\x03fooE\x9d\xfe\x10'
-CONSTANT_UXSUCCESS = b'\xb3)\x04\x0c\x03fooX\x98\xce\xa8'
-CONSTANT_SKIP = b'\xb3)\x05\x0c\x03foo\x93\xc4\x1d\r'
-CONSTANT_FAIL = b'\xb3)\x06\x0c\x03foo\x15Po\xa3'
-CONSTANT_XFAIL = b'\xb3)\x07\x0c\x03foo\xde\x0c\xbc\x06'
-CONSTANT_EOF = b'\xb3!\x10\x08S\x15\x88\xdc'
-CONSTANT_FILE_CONTENT = b'\xb3!@\x13\x06barney\x03wooA5\xe3\x8c'
-CONSTANT_MIME = b'\xb3! #\x1aapplication/foo; charset=1x3Q\x15'
-CONSTANT_TIMESTAMP = b'\xb3+\x03\x13<\x17T\xcf\x80\xaf\xc8\x03barI\x96>-'
-CONSTANT_ROUTE_CODE = b'\xb3-\x03\x13\x03bar\x06source\x9cY9\x19'
-CONSTANT_RUNNABLE = b'\xb3(\x03\x0c\x03foo\xe3\xea\xf5\xa4'
-CONSTANT_TAGS = [
-    b'\xb3)\x80\x15\x03bar\x02\x03foo\x03barTHn\xb4',
-    b'\xb3)\x80\x15\x03bar\x02\x03bar\x03foo\xf8\xf1\x91o',
-    ]
-
-
-class TestStreamResultToBytesContract(TestCase, TestStreamResultContract):
-    """Check that StreamResult behaves as testtools expects."""
-
-    def _make_result(self):
-        return subunit.StreamResultToBytes(BytesIO())
-
-
-class TestStreamResultToBytes(TestCase):
-
-    def _make_result(self):
-        output = BytesIO()
-        return subunit.StreamResultToBytes(output), output
-
-    def test_numbers(self):
-        result = subunit.StreamResultToBytes(BytesIO())
-        packet = []
-        self.assertRaises(Exception, result._write_number, -1, packet)
-        self.assertEqual([], packet)
-        result._write_number(0, packet)
-        self.assertEqual([b'\x00'], packet)
-        del packet[:]
-        result._write_number(63, packet)
-        self.assertEqual([b'\x3f'], packet)
-        del packet[:]
-        result._write_number(64, packet)
-        self.assertEqual([b'\x40\x40'], packet)
-        del packet[:]
-        result._write_number(16383, packet)
-        self.assertEqual([b'\x7f\xff'], packet)
-        del packet[:]
-        result._write_number(16384, packet)
-        self.assertEqual([b'\x80\x40', b'\x00'], packet)
-        del packet[:]
-        result._write_number(4194303, packet)
-        self.assertEqual([b'\xbf\xff', b'\xff'], packet)
-        del packet[:]
-        result._write_number(4194304, packet)
-        self.assertEqual([b'\xc0\x40\x00\x00'], packet)
-        del packet[:]
-        result._write_number(1073741823, packet)
-        self.assertEqual([b'\xff\xff\xff\xff'], packet)
-        del packet[:]
-        self.assertRaises(Exception, result._write_number, 1073741824, packet)
-        self.assertEqual([], packet)
-
-    def test_volatile_length(self):
-        # if the length of the packet data before the length itself is
-        # considered is right on the boundary for length's variable length
-        # encoding, it is easy to get the length wrong by not accounting for
-        # length itself.
-        # that is, the encoder has to ensure that length == sum (length_of_rest
-        # + length_of_length)
-        result, output = self._make_result()
-        # 1 byte short:
-        result.status(file_name="", file_bytes=b'\xff'*0)
-        self.assertThat(output.getvalue(), HasLength(10))
-        self.assertEqual(b'\x0a', output.getvalue()[3:4])
-        output.seek(0)
-        output.truncate()
-        # 1 byte long:
-        result.status(file_name="", file_bytes=b'\xff'*53)
-        self.assertThat(output.getvalue(), HasLength(63))
-        self.assertEqual(b'\x3f', output.getvalue()[3:4])
-        output.seek(0)
-        output.truncate()
-        # 2 bytes short
-        result.status(file_name="", file_bytes=b'\xff'*54)
-        self.assertThat(output.getvalue(), HasLength(65))
-        self.assertEqual(b'\x40\x41', output.getvalue()[3:5])
-        output.seek(0)
-        output.truncate()
-        # 2 bytes long
-        result.status(file_name="", file_bytes=b'\xff'*16371)
-        self.assertThat(output.getvalue(), HasLength(16383))
-        self.assertEqual(b'\x7f\xff', output.getvalue()[3:5])
-        output.seek(0)
-        output.truncate()
-        # 3 bytes short
-        result.status(file_name="", file_bytes=b'\xff'*16372)
-        self.assertThat(output.getvalue(), HasLength(16385))
-        self.assertEqual(b'\x80\x40\x01', output.getvalue()[3:6])
-        output.seek(0)
-        output.truncate()
-        # 3 bytes long
-        result.status(file_name="", file_bytes=b'\xff'*4194289)
-        self.assertThat(output.getvalue(), HasLength(4194303))
-        self.assertEqual(b'\xbf\xff\xff', output.getvalue()[3:6])
-        output.seek(0)
-        output.truncate()
-        self.assertRaises(Exception, result.status, file_name="",
-            file_bytes=b'\xff'*4194290)
-
-    def test_trivial_enumeration(self):
-        result, output = self._make_result()
-        result.status("foo", 'exists')
-        self.assertEqual(CONSTANT_ENUM, output.getvalue())
-
-    def test_inprogress(self):
-        result, output = self._make_result()
-        result.status("foo", 'inprogress')
-        self.assertEqual(CONSTANT_INPROGRESS, output.getvalue())
-
-    def test_success(self):
-        result, output = self._make_result()
-        result.status("foo", 'success')
-        self.assertEqual(CONSTANT_SUCCESS, output.getvalue())
-
-    def test_uxsuccess(self):
-        result, output = self._make_result()
-        result.status("foo", 'uxsuccess')
-        self.assertEqual(CONSTANT_UXSUCCESS, output.getvalue())
-
-    def test_skip(self):
-        result, output = self._make_result()
-        result.status("foo", 'skip')
-        self.assertEqual(CONSTANT_SKIP, output.getvalue())
-
-    def test_fail(self):
-        result, output = self._make_result()
-        result.status("foo", 'fail')
-        self.assertEqual(CONSTANT_FAIL, output.getvalue())
-
-    def test_xfail(self):
-        result, output = self._make_result()
-        result.status("foo", 'xfail')
-        self.assertEqual(CONSTANT_XFAIL, output.getvalue())
-
-    def test_unknown_status(self):
-        result, output = self._make_result()
-        self.assertRaises(Exception, result.status, "foo", 'boo')
-        self.assertEqual(b'', output.getvalue())
-
-    def test_eof(self):
-        result, output = self._make_result()
-        result.status(eof=True)
-        self.assertEqual(CONSTANT_EOF, output.getvalue())
-
-    def test_file_content(self):
-        result, output = self._make_result()
-        result.status(file_name="barney", file_bytes=b"woo")
-        self.assertEqual(CONSTANT_FILE_CONTENT, output.getvalue())
-
-    def test_mime(self):
-        result, output = self._make_result()
-        result.status(mime_type="application/foo; charset=1")
-        self.assertEqual(CONSTANT_MIME, output.getvalue())
-
-    def test_route_code(self):
-        result, output = self._make_result()
-        result.status(test_id="bar", test_status='success',
-            route_code="source")
-        self.assertEqual(CONSTANT_ROUTE_CODE, output.getvalue())
-
-    def test_runnable(self):
-        result, output = self._make_result()
-        result.status("foo", 'success', runnable=False)
-        self.assertEqual(CONSTANT_RUNNABLE, output.getvalue())
-
-    def test_tags(self):
-        result, output = self._make_result()
-        result.status(test_id="bar", test_tags=set(['foo', 'bar']))
-        self.assertThat(CONSTANT_TAGS, Contains(output.getvalue()))
-
-    def test_timestamp(self):
-        timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
-            iso8601.Utc())
-        result, output = self._make_result()
-        result.status(test_id="bar", test_status='success', timestamp=timestamp)
-        self.assertEqual(CONSTANT_TIMESTAMP, output.getvalue())
-
-
-class TestByteStreamToStreamResult(TestCase):
-
-    def test_non_subunit_encapsulated(self):
-        source = BytesIO(b"foo\nbar\n")
-        result = StreamResult()
-        subunit.ByteStreamToStreamResult(
-            source, non_subunit_name="stdout").run(result)
-        self.assertEqual([
-            ('status', None, None, None, True, 'stdout', b'f', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'b', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'a', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'r', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
-            ], result._events)
-        self.assertEqual(b'', source.read())
-
-    def test_signature_middle_utf8_char(self):
-        utf8_bytes = b'\xe3\xb3\x8a'
-        source = BytesIO(utf8_bytes)
-        # Should be treated as one character (it is u'\u3cca') and wrapped
-        result = StreamResult()
-        subunit.ByteStreamToStreamResult(
-            source, non_subunit_name="stdout").run(
-            result)
-        self.assertEqual([
-            ('status', None, None, None, True, 'stdout', b'\xe3', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'\xb3', False, None, None, None),
-            ('status', None, None, None, True, 'stdout', b'\x8a', False, None, None, None),
-            ], result._events)
-
-    def test_non_subunit_disabled_raises(self):
-        source = BytesIO(b"foo\nbar\n")
-        result = StreamResult()
-        case = subunit.ByteStreamToStreamResult(source)
-        e = self.assertRaises(Exception, case.run, result)
-        self.assertEqual(b'f', e.args[1])
-        self.assertEqual(b'oo\nbar\n', source.read())
-        self.assertEqual([], result._events)
-
-    def test_trivial_enumeration(self):
-        source = BytesIO(CONSTANT_ENUM)
-        result = StreamResult()
-        subunit.ByteStreamToStreamResult(
-            source, non_subunit_name="stdout").run(result)
-        self.assertEqual(b'', source.read())
-        self.assertEqual([
-            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
-            ], result._events)
-
-    def test_multiple_events(self):
-        source = BytesIO(CONSTANT_ENUM + CONSTANT_ENUM)
-        result = StreamResult()
-        subunit.ByteStreamToStreamResult(
-            source, non_subunit_name="stdout").run(result)
-        self.assertEqual(b'', source.read())
-        self.assertEqual([
-            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
-            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
-            ], result._events)
-
-    def test_inprogress(self):
-        self.check_event(CONSTANT_INPROGRESS, 'inprogress')
-
-    def test_success(self):
-        self.check_event(CONSTANT_SUCCESS, 'success')
-
-    def test_uxsuccess(self):
-        self.check_event(CONSTANT_UXSUCCESS, 'uxsuccess')
-
-    def test_skip(self):
-        self.check_event(CONSTANT_SKIP, 'skip')
-
-    def test_fail(self):
-        self.check_event(CONSTANT_FAIL, 'fail')
-
-    def test_xfail(self):
-        self.check_event(CONSTANT_XFAIL, 'xfail')
-
-    def check_events(self, source_bytes, events):
-        source = BytesIO(source_bytes)
-        result = StreamResult()
-        subunit.ByteStreamToStreamResult(
-            source, non_subunit_name="stdout").run(result)
-        self.assertEqual(b'', source.read())
-        self.assertEqual(events, result._events)
-        #- any file attachments should be byte contents [as users assume that].
-        for event in result._events:
-            if event[5] is not None:
-                self.assertIsInstance(event[6], bytes)
-
-    def check_event(self, source_bytes, test_status=None, test_id="foo",
-        route_code=None, timestamp=None, tags=None, mime_type=None,
-        file_name=None, file_bytes=None, eof=False, runnable=True):
-        event = self._event(test_id=test_id, test_status=test_status,
-            tags=tags, runnable=runnable, file_name=file_name,
-            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
-            route_code=route_code, timestamp=timestamp)
-        self.check_events(source_bytes, [event])
-
-    def _event(self, test_status=None, test_id=None, route_code=None,
-        timestamp=None, tags=None, mime_type=None, file_name=None,
-        file_bytes=None, eof=False, runnable=True):
-        return ('status', test_id, test_status, tags, runnable, file_name,
-            file_bytes, eof, mime_type, route_code, timestamp)
-
-    def test_eof(self):
-        self.check_event(CONSTANT_EOF, test_id=None, eof=True)
-
-    def test_file_content(self):
-        self.check_event(CONSTANT_FILE_CONTENT,
-            test_id=None, file_name="barney", file_bytes=b"woo")
-
-    def test_file_content_length_into_checksum(self):
-        # A bad file content length which creeps into the checksum.
-        bad_file_length_content = b'\xb3!@\x13\x06barney\x04woo\xdc\xe2\xdb\x35'
-        self.check_events(bad_file_length_content, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=bad_file_length_content,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b"File content extends past end of packet: claimed 4 bytes, 3 available",
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_packet_length_4_word_varint(self):
-        packet_data = b'\xb3!@\xc0\x00\x11'
-        self.check_events(packet_data, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=packet_data,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b"3 byte maximum given but 4 byte value found.",
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_mime(self):
-        self.check_event(CONSTANT_MIME,
-            test_id=None, mime_type='application/foo; charset=1')
-
-    def test_route_code(self):
-        self.check_event(CONSTANT_ROUTE_CODE,
-            'success', route_code="source", test_id="bar")
-
-    def test_runnable(self):
-        self.check_event(CONSTANT_RUNNABLE,
-            test_status='success', runnable=False)
-
-    def test_tags(self):
-        self.check_event(CONSTANT_TAGS[0],
-            None, tags=set(['foo', 'bar']), test_id="bar")
-
-    def test_timestamp(self):
-        timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
-            iso8601.Utc())
-        self.check_event(CONSTANT_TIMESTAMP,
-            'success', test_id='bar', timestamp=timestamp)
-
-    def test_bad_crc_errors_via_status(self):
-        file_bytes = CONSTANT_MIME[:-1] + b'\x00'
-        self.check_events( file_bytes, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=file_bytes,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b'Bad checksum - calculated (0x78335115), '
-                    b'stored (0x78335100)',
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_not_utf8_in_string(self):
-        file_bytes = CONSTANT_ROUTE_CODE[:5] + b'\xb4' + CONSTANT_ROUTE_CODE[6:-4] + b'\xce\x56\xc6\x17'
-        self.check_events(file_bytes, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=file_bytes,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b'UTF8 string at offset 2 is not UTF8',
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_NULL_in_string(self):
-        file_bytes = CONSTANT_ROUTE_CODE[:6] + b'\x00' + CONSTANT_ROUTE_CODE[7:-4] + b'\xd7\x41\xac\xfe'
-        self.check_events(file_bytes, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=file_bytes,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b'UTF8 string at offset 2 contains NUL byte',
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_bad_utf8_stringlength(self):
-        file_bytes = CONSTANT_ROUTE_CODE[:4] + b'\x3f' + CONSTANT_ROUTE_CODE[5:-4] + b'\xbe\x29\xe0\xc2'
-        self.check_events(file_bytes, [
-            self._event(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=file_bytes,
-                mime_type="application/octet-stream"),
-            self._event(test_id="subunit.parser", test_status="fail", eof=True,
-                file_name="Parser Error",
-                file_bytes=b'UTF8 string at offset 2 extends past end of '
-                    b'packet: claimed 63 bytes, 10 available',
-                mime_type="text/plain;charset=utf8"),
-            ])
-
-    def test_route_code_and_file_content(self):
-        content = BytesIO()
-        subunit.StreamResultToBytes(content).status(
-            route_code='0', mime_type='text/plain', file_name='bar',
-            file_bytes=b'foo')
-        self.check_event(content.getvalue(), test_id=None, file_name='bar',
-            route_code='0', mime_type='text/plain', file_bytes=b'foo')
diff --git a/lib/subunit/python/subunit/tests/test_test_results.py b/lib/subunit/python/subunit/tests/test_test_results.py
deleted file mode 100644
index 44f95b3..0000000
--- a/lib/subunit/python/subunit/tests/test_test_results.py
+++ /dev/null
@@ -1,566 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import csv
-import datetime
-import sys
-import unittest
-
-from testtools import TestCase
-from testtools.compat import StringIO
-from testtools.content import (
-    text_content,
-    TracebackContent,
-    )
-from testtools.testresult.doubles import ExtendedTestResult
-
-import subunit
-import subunit.iso8601 as iso8601
-import subunit.test_results
-
-import testtools
-
-
-class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
-
-    def __init__(self, decorated):
-        self._calls = 0
-        super(LoggingDecorator, self).__init__(decorated)
-
-    def _before_event(self):
-        self._calls += 1
-
-
-class AssertBeforeTestResult(LoggingDecorator):
-    """A TestResult for checking preconditions."""
-
-    def __init__(self, decorated, test):
-        self.test = test
-        super(AssertBeforeTestResult, self).__init__(decorated)
-
-    def _before_event(self):
-        self.test.assertEqual(1, self.earlier._calls)
-        super(AssertBeforeTestResult, self)._before_event()
-
-
-class TimeCapturingResult(unittest.TestResult):
-
-    def __init__(self):
-        super(TimeCapturingResult, self).__init__()
-        self._calls = []
-        self.failfast = False
-
-    def time(self, a_datetime):
-        self._calls.append(a_datetime)
-
-
-class TestHookedTestResultDecorator(unittest.TestCase):
-
-    def setUp(self):
-        # An end to the chain
-        terminal = unittest.TestResult()
-        # Asserts that the call was made to self.result before asserter was
-        # called.
-        asserter = AssertBeforeTestResult(terminal, self)
-        # The result object we call, which much increase its call count.
-        self.result = LoggingDecorator(asserter)
-        asserter.earlier = self.result
-        self.decorated = asserter
-
-    def tearDown(self):
-        # The hook in self.result must have been called
-        self.assertEqual(1, self.result._calls)
-        # The hook in asserter must have been called too, otherwise the
-        # assertion about ordering won't have completed.
-        self.assertEqual(1, self.decorated._calls)
-
-    def test_startTest(self):
-        self.result.startTest(self)
-
-    def test_startTestRun(self):
-        self.result.startTestRun()
-
-    def test_stopTest(self):
-        self.result.stopTest(self)
-
-    def test_stopTestRun(self):
-        self.result.stopTestRun()
-
-    def test_addError(self):
-        self.result.addError(self, subunit.RemoteError())
-
-    def test_addError_details(self):
-        self.result.addError(self, details={})
-
-    def test_addFailure(self):
-        self.result.addFailure(self, subunit.RemoteError())
-
-    def test_addFailure_details(self):
-        self.result.addFailure(self, details={})
-
-    def test_addSuccess(self):
-        self.result.addSuccess(self)
-
-    def test_addSuccess_details(self):
-        self.result.addSuccess(self, details={})
-
-    def test_addSkip(self):
-        self.result.addSkip(self, "foo")
-
-    def test_addSkip_details(self):
-        self.result.addSkip(self, details={})
-
-    def test_addExpectedFailure(self):
-        self.result.addExpectedFailure(self, subunit.RemoteError())
-
-    def test_addExpectedFailure_details(self):
-        self.result.addExpectedFailure(self, details={})
-
-    def test_addUnexpectedSuccess(self):
-        self.result.addUnexpectedSuccess(self)
-
-    def test_addUnexpectedSuccess_details(self):
-        self.result.addUnexpectedSuccess(self, details={})
-
-    def test_progress(self):
-        self.result.progress(1, subunit.PROGRESS_SET)
-
-    def test_wasSuccessful(self):
-        self.result.wasSuccessful()
-
-    def test_shouldStop(self):
-        self.result.shouldStop
-
-    def test_stop(self):
-        self.result.stop()
-
-    def test_time(self):
-        self.result.time(None)
-
-
-class TestAutoTimingTestResultDecorator(unittest.TestCase):
-
-    def setUp(self):
-        # And end to the chain which captures time events.
-        terminal = TimeCapturingResult()
-        # The result object under test.
-        self.result = subunit.test_results.AutoTimingTestResultDecorator(
-            terminal)
-        self.decorated = terminal
-
-    def test_without_time_calls_time_is_called_and_not_None(self):
-        self.result.startTest(self)
-        self.assertEqual(1, len(self.decorated._calls))
-        self.assertNotEqual(None, self.decorated._calls[0])
-
-    def test_no_time_from_progress(self):
-        self.result.progress(1, subunit.PROGRESS_CUR)
-        self.assertEqual(0, len(self.decorated._calls))
-
-    def test_no_time_from_shouldStop(self):
-        self.decorated.stop()
-        self.result.shouldStop
-        self.assertEqual(0, len(self.decorated._calls))
-
-    def test_calling_time_inhibits_automatic_time(self):
-        # Calling time() outputs a time signal immediately and prevents
-        # automatically adding one when other methods are called.
-        time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
-        self.result.time(time)
-        self.result.startTest(self)
-        self.result.stopTest(self)
-        self.assertEqual(1, len(self.decorated._calls))
-        self.assertEqual(time, self.decorated._calls[0])
-
-    def test_calling_time_None_enables_automatic_time(self):
-        time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
-        self.result.time(time)
-        self.assertEqual(1, len(self.decorated._calls))
-        self.assertEqual(time, self.decorated._calls[0])
-        # Calling None passes the None through, in case other results care.
-        self.result.time(None)
-        self.assertEqual(2, len(self.decorated._calls))
-        self.assertEqual(None, self.decorated._calls[1])
-        # Calling other methods doesn't generate an automatic time event.
-        self.result.startTest(self)
-        self.assertEqual(3, len(self.decorated._calls))
-        self.assertNotEqual(None, self.decorated._calls[2])
-
-    def test_set_failfast_True(self):
-        self.assertFalse(self.decorated.failfast)
-        self.result.failfast = True
-        self.assertTrue(self.decorated.failfast)
-
-
-class TestTagCollapsingDecorator(TestCase):
-
-    def test_tags_collapsed_outside_of_tests(self):
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        tag_collapser.tags(set(['a']), set())
-        tag_collapser.tags(set(['b']), set())
-        tag_collapser.startTest(self)
-        self.assertEquals(
-            [('tags', set(['a', 'b']), set([])),
-             ('startTest', self),
-             ], result._events)
-
-    def test_tags_collapsed_outside_of_tests_are_flushed(self):
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        tag_collapser.startTestRun()
-        tag_collapser.tags(set(['a']), set())
-        tag_collapser.tags(set(['b']), set())
-        tag_collapser.startTest(self)
-        tag_collapser.addSuccess(self)
-        tag_collapser.stopTest(self)
-        tag_collapser.stopTestRun()
-        self.assertEquals(
-            [('startTestRun',),
-             ('tags', set(['a', 'b']), set([])),
-             ('startTest', self),
-             ('addSuccess', self),
-             ('stopTest', self),
-             ('stopTestRun',),
-             ], result._events)
-
-    def test_tags_forwarded_after_tests(self):
-        test = subunit.RemotedTestCase('foo')
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        tag_collapser.startTestRun()
-        tag_collapser.startTest(test)
-        tag_collapser.addSuccess(test)
-        tag_collapser.stopTest(test)
-        tag_collapser.tags(set(['a']), set(['b']))
-        tag_collapser.stopTestRun()
-        self.assertEqual(
-            [('startTestRun',),
-             ('startTest', test),
-             ('addSuccess', test),
-             ('stopTest', test),
-             ('tags', set(['a']), set(['b'])),
-             ('stopTestRun',),
-             ],
-            result._events)
-
-    def test_tags_collapsed_inside_of_tests(self):
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        test = subunit.RemotedTestCase('foo')
-        tag_collapser.startTest(test)
-        tag_collapser.tags(set(['a']), set())
-        tag_collapser.tags(set(['b']), set(['a']))
-        tag_collapser.tags(set(['c']), set())
-        tag_collapser.stopTest(test)
-        self.assertEquals(
-            [('startTest', test),
-             ('tags', set(['b', 'c']), set(['a'])),
-             ('stopTest', test)],
-            result._events)
-
-    def test_tags_collapsed_inside_of_tests_different_ordering(self):
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        test = subunit.RemotedTestCase('foo')
-        tag_collapser.startTest(test)
-        tag_collapser.tags(set(), set(['a']))
-        tag_collapser.tags(set(['a', 'b']), set())
-        tag_collapser.tags(set(['c']), set())
-        tag_collapser.stopTest(test)
-        self.assertEquals(
-            [('startTest', test),
-             ('tags', set(['a', 'b', 'c']), set()),
-             ('stopTest', test)],
-            result._events)
-
-    def test_tags_sent_before_result(self):
-        # Because addSuccess and friends tend to send subunit output
-        # immediately, and because 'tags:' before a result line means
-        # something different to 'tags:' after a result line, we need to be
-        # sure that tags are emitted before 'addSuccess' (or whatever).
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
-        test = subunit.RemotedTestCase('foo')
-        tag_collapser.startTest(test)
-        tag_collapser.tags(set(['a']), set())
-        tag_collapser.addSuccess(test)
-        tag_collapser.stopTest(test)
-        self.assertEquals(
-            [('startTest', test),
-             ('tags', set(['a']), set()),
-             ('addSuccess', test),
-             ('stopTest', test)],
-            result._events)
-
-
-class TestTimeCollapsingDecorator(TestCase):
-
-    def make_time(self):
-        # Heh heh.
-        return datetime.datetime(
-            2000, 1, self.getUniqueInteger(), tzinfo=iso8601.UTC)
-
-    def test_initial_time_forwarded(self):
-        # We always forward the first time event we see.
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
-        a_time = self.make_time()
-        tag_collapser.time(a_time)
-        self.assertEquals([('time', a_time)], result._events)
-
-    def test_time_collapsed_to_first_and_last(self):
-        # If there are many consecutive time events, only the first and last
-        # are sent through.
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
-        times = [self.make_time() for i in range(5)]
-        for a_time in times:
-            tag_collapser.time(a_time)
-        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
-        self.assertEquals(
-            [('time', times[0]), ('time', times[-1])], result._events[:-1])
-
-    def test_only_one_time_sent(self):
-        # If we receive a single time event followed by a non-time event, we
-        # send exactly one time event.
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
-        a_time = self.make_time()
-        tag_collapser.time(a_time)
-        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
-        self.assertEquals([('time', a_time)], result._events[:-1])
-
-    def test_duplicate_times_not_sent(self):
-        # Many time events with the exact same time are collapsed into one
-        # time event.
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
-        a_time = self.make_time()
-        for i in range(5):
-            tag_collapser.time(a_time)
-        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
-        self.assertEquals([('time', a_time)], result._events[:-1])
-
-    def test_no_times_inserted(self):
-        result = ExtendedTestResult()
-        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
-        a_time = self.make_time()
-        tag_collapser.time(a_time)
-        foo = subunit.RemotedTestCase('foo')
-        tag_collapser.startTest(foo)
-        tag_collapser.addSuccess(foo)
-        tag_collapser.stopTest(foo)
-        self.assertEquals(
-            [('time', a_time),
-             ('startTest', foo),
-             ('addSuccess', foo),
-             ('stopTest', foo)], result._events)
-
-
-class TestByTestResultTests(testtools.TestCase):
-
-    def setUp(self):
-        super(TestByTestResultTests, self).setUp()
-        self.log = []
-        self.result = subunit.test_results.TestByTestResult(self.on_test)
-        if sys.version_info >= (3, 0):
-            self.result._now = iter(range(5)).__next__
-        else:
-            self.result._now = iter(range(5)).next
-
-    def assertCalled(self, **kwargs):
-        defaults = {
-            'test': self,
-            'tags': set(),
-            'details': None,
-            'start_time': 0,
-            'stop_time': 1,
-            }
-        defaults.update(kwargs)
-        self.assertEqual([defaults], self.log)
-
-    def on_test(self, **kwargs):
-        self.log.append(kwargs)
-
-    def test_no_tests_nothing_reported(self):
-        self.result.startTestRun()
-        self.result.stopTestRun()
-        self.assertEqual([], self.log)
-
-    def test_add_success(self):
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertCalled(status='success')
-
-    def test_add_success_details(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addSuccess(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', details=details)
-
-    def test_tags(self):
-        if not getattr(self.result, 'tags', None):
-            self.skipTest("No tags in testtools")
-        self.result.tags(['foo'], [])
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', tags=set(['foo']))
-
-    def test_add_error(self):
-        self.result.startTest(self)
-        try:
-            1/0
-        except ZeroDivisionError:
-            error = sys.exc_info()
-        self.result.addError(self, error)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='error',
-            details={'traceback': TracebackContent(error, self)})
-
-    def test_add_error_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addError(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='error', details=details)
-
-    def test_add_failure(self):
-        self.result.startTest(self)
-        try:
-            self.fail("intentional failure")
-        except self.failureException:
-            failure = sys.exc_info()
-        self.result.addFailure(self, failure)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='failure',
-            details={'traceback': TracebackContent(failure, self)})
-
-    def test_add_failure_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addFailure(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='failure', details=details)
-
-    def test_add_xfail(self):
-        self.result.startTest(self)
-        try:
-            1/0
-        except ZeroDivisionError:
-            error = sys.exc_info()
-        self.result.addExpectedFailure(self, error)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='xfail',
-            details={'traceback': TracebackContent(error, self)})
-
-    def test_add_xfail_details(self):
-        self.result.startTest(self)
-        details = {"foo": text_content("bar")}
-        self.result.addExpectedFailure(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='xfail', details=details)
-
-    def test_add_unexpected_success(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addUnexpectedSuccess(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='success', details=details)
-
-    def test_add_skip_reason(self):
-        self.result.startTest(self)
-        reason = self.getUniqueString()
-        self.result.addSkip(self, reason)
-        self.result.stopTest(self)
-        self.assertCalled(
-            status='skip', details={'reason': text_content(reason)})
-
-    def test_add_skip_details(self):
-        self.result.startTest(self)
-        details = {'foo': 'bar'}
-        self.result.addSkip(self, details=details)
-        self.result.stopTest(self)
-        self.assertCalled(status='skip', details=details)
-
-    def test_twice(self):
-        self.result.startTest(self)
-        self.result.addSuccess(self, details={'foo': 'bar'})
-        self.result.stopTest(self)
-        self.result.startTest(self)
-        self.result.addSuccess(self)
-        self.result.stopTest(self)
-        self.assertEqual(
-            [{'test': self,
-              'status': 'success',
-              'start_time': 0,
-              'stop_time': 1,
-              'tags': set(),
-              'details': {'foo': 'bar'}},
-             {'test': self,
-              'status': 'success',
-              'start_time': 2,
-              'stop_time': 3,
-              'tags': set(),
-              'details': None},
-             ],
-            self.log)
-
-
-class TestCsvResult(testtools.TestCase):
-
-    def parse_stream(self, stream):
-        stream.seek(0)
-        reader = csv.reader(stream)
-        return list(reader)
-
-    def test_csv_output(self):
-        stream = StringIO()
-        result = subunit.test_results.CsvResult(stream)
-        if sys.version_info >= (3, 0):
-            result._now = iter(range(5)).__next__
-        else:
-            result._now = iter(range(5)).next
-        result.startTestRun()
-        result.startTest(self)
-        result.addSuccess(self)
-        result.stopTest(self)
-        result.stopTestRun()
-        self.assertEqual(
-            [['test', 'status', 'start_time', 'stop_time'],
-             [self.id(), 'success', '0', '1'],
-             ],
-            self.parse_stream(stream))
-
-    def test_just_header_when_no_tests(self):
-        stream = StringIO()
-        result = subunit.test_results.CsvResult(stream)
-        result.startTestRun()
-        result.stopTestRun()
-        self.assertEqual(
-            [['test', 'status', 'start_time', 'stop_time']],
-            self.parse_stream(stream))
-
-    def test_no_output_before_events(self):
-        stream = StringIO()
-        subunit.test_results.CsvResult(stream)
-        self.assertEqual([], self.parse_stream(stream))
diff --git a/lib/subunit/python/subunit/v2.py b/lib/subunit/python/subunit/v2.py
deleted file mode 100644
index b1d508d..0000000
--- a/lib/subunit/python/subunit/v2.py
+++ /dev/null
@@ -1,494 +0,0 @@
-#
-#  subunit: extensions to Python unittest to get test results from subprocesses.
-#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-import codecs
-utf_8_decode = codecs.utf_8_decode
-import datetime
-from io import UnsupportedOperation
-import os
-import select
-import struct
-import zlib
-
-from extras import safe_hasattr, try_imports
-builtins = try_imports(['__builtin__', 'builtins'])
-
-import subunit
-import subunit.iso8601 as iso8601
-
-__all__ = [
-    'ByteStreamToStreamResult',
-    'StreamResultToBytes',
-    ]
-
-SIGNATURE = b'\xb3'
-FMT_8  = '>B'
-FMT_16 = '>H'
-FMT_24 = '>HB'
-FMT_32 = '>I'
-FMT_TIMESTAMP = '>II'
-FLAG_TEST_ID = 0x0800
-FLAG_ROUTE_CODE = 0x0400
-FLAG_TIMESTAMP = 0x0200
-FLAG_RUNNABLE = 0x0100
-FLAG_TAGS = 0x0080
-FLAG_MIME_TYPE = 0x0020
-FLAG_EOF = 0x0010
-FLAG_FILE_CONTENT = 0x0040
-EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=iso8601.Utc())
-NUL_ELEMENT = b'\0'[0]
-# Contains True for types for which 'nul in thing' falsely returns false.
-_nul_test_broken = {}
-
-
-def has_nul(buffer_or_bytes):
-    """Return True if a null byte is present in buffer_or_bytes."""
-    # Simple "if NUL_ELEMENT in utf8_bytes:" fails on Python 3.1 and 3.2 with
-    # memoryviews. See https://bugs.launchpad.net/subunit/+bug/1216246
-    buffer_type = type(buffer_or_bytes)
-    broken = _nul_test_broken.get(buffer_type)
-    if broken is None:
-        reference = buffer_type(b'\0')
-        broken = not NUL_ELEMENT in reference
-        _nul_test_broken[buffer_type] = broken
-    if broken:
-        return b'\0' in buffer_or_bytes
-    else:
-        return NUL_ELEMENT in buffer_or_bytes
-
-
-class ParseError(Exception):
-    """Used to pass error messages within the parser."""
-
-
-class StreamResultToBytes(object):
-    """Convert StreamResult API calls to bytes.
-
-    The StreamResult API is defined by testtools.StreamResult.
-    """
-
-    status_mask = {
-        None: 0,
-        'exists': 0x1,
-        'inprogress': 0x2,
-        'success': 0x3,
-        'uxsuccess': 0x4,
-        'skip': 0x5,
-        'fail': 0x6,
-        'xfail': 0x7,
-        }
-
-    zero_b = b'\0'[0]
-
-    def __init__(self, output_stream):
-        """Create a StreamResultToBytes with output written to output_stream.
-
-        :param output_stream: A file-like object. Must support write(bytes)
-            and flush() methods. Flush will be called after each write.
-            The stream will be passed through subunit.make_stream_binary,
-            to handle regular cases such as stdout.
-        """
-        self.output_stream = subunit.make_stream_binary(output_stream)
-
-    def startTestRun(self):
-        pass
-
-    def stopTestRun(self):
-        pass
-
-    def status(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        self._write_packet(test_id=test_id, test_status=test_status,
-            test_tags=test_tags, runnable=runnable, file_name=file_name,
-            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
-            route_code=route_code, timestamp=timestamp)
-
-    def _write_utf8(self, a_string, packet):
-        utf8 = a_string.encode('utf-8')
-        self._write_number(len(utf8), packet)
-        packet.append(utf8)
-
-    def _write_len16(self, length, packet):
-        assert length < 65536
-        packet.append(struct.pack(FMT_16, length))
-
-    def _write_number(self, value, packet):
-        packet.extend(self._encode_number(value))
-
-    def _encode_number(self, value):
-        assert value >= 0
-        if value < 64:
-            return [struct.pack(FMT_8, value)]
-        elif value < 16384:
-            value = value | 0x4000
-            return [struct.pack(FMT_16, value)]
-        elif value < 4194304:
-            value = value | 0x800000
-            return [struct.pack(FMT_16, value >> 8),
-                    struct.pack(FMT_8, value & 0xff)]
-        elif value < 1073741824:
-            value = value | 0xc0000000
-            return [struct.pack(FMT_32, value)]
-        else:
-            raise ValueError('value too large to encode: %r' % (value,))
-
-    def _write_packet(self, test_id=None, test_status=None, test_tags=None,
-        runnable=True, file_name=None, file_bytes=None, eof=False,
-        mime_type=None, route_code=None, timestamp=None):
-        packet = [SIGNATURE]
-        packet.append(b'FF') # placeholder for flags
-        # placeholder for length, but see below as length is variable.
-        packet.append(b'')
-        flags = 0x2000 # Version 0x2
-        if timestamp is not None:
-            flags = flags | FLAG_TIMESTAMP
-            since_epoch = timestamp - EPOCH
-            nanoseconds = since_epoch.microseconds * 1000
-            seconds = (since_epoch.seconds + since_epoch.days * 24 * 3600)
-            packet.append(struct.pack(FMT_32, seconds))
-            self._write_number(nanoseconds, packet)
-        if test_id is not None:
-            flags = flags | FLAG_TEST_ID
-            self._write_utf8(test_id, packet)
-        if test_tags:
-            flags = flags | FLAG_TAGS
-            self._write_number(len(test_tags), packet)
-            for tag in test_tags:
-                self._write_utf8(tag, packet)
-        if runnable:
-            flags = flags | FLAG_RUNNABLE
-        if mime_type:
-            flags = flags | FLAG_MIME_TYPE
-            self._write_utf8(mime_type, packet)
-        if file_name is not None:
-            flags = flags | FLAG_FILE_CONTENT
-            self._write_utf8(file_name, packet)
-            self._write_number(len(file_bytes), packet)
-            packet.append(file_bytes)
-        if eof:
-           flags = flags | FLAG_EOF
-        if route_code is not None:
-            flags = flags | FLAG_ROUTE_CODE
-            self._write_utf8(route_code, packet)
-        # 0x0008 - not used in v2.
-        flags = flags | self.status_mask[test_status]
-        packet[1] = struct.pack(FMT_16, flags)
-        base_length = sum(map(len, packet)) + 4
-        if base_length <= 62:
-            # one byte to encode length, 62+1 = 63
-            length_length = 1
-        elif base_length <= 16381:
-            # two bytes to encode length, 16381+2 = 16383
-            length_length = 2
-        elif base_length <= 4194300:
-            # three bytes to encode length, 419430+3=4194303
-            length_length = 3
-        else:
-            # Longer than policy:
-            # TODO: chunk the packet automatically?
-            # - strip all but file data
-            # - do 4M chunks of that till done
-            # - include original data in final chunk.
-            raise ValueError("Length too long: %r" % base_length)
-        packet[2:3] = self._encode_number(base_length + length_length)
-        # We could either do a partial application of crc32 over each chunk
-        # or a single join to a temp variable then a final join
-        # or two writes (that python might then split).
-        # For now, simplest code: join, crc32, join, output
-        content = b''.join(packet)
-        self.output_stream.write(content + struct.pack(
-            FMT_32, zlib.crc32(content) & 0xffffffff))
-        self.output_stream.flush()
-
-
-class ByteStreamToStreamResult(object):
-    """Parse a subunit byte stream.
-
-    Mixed streams that contain non-subunit content is supported when a
-    non_subunit_name is passed to the contructor. The default is to raise an
-    error containing the non-subunit byte after it has been read from the
-    stream.
-
-    Typical use:
-
-       >>> case = ByteStreamToStreamResult(sys.stdin.buffer)
-       >>> result = StreamResult()
-       >>> result.startTestRun()
-       >>> case.run(result)
-       >>> result.stopTestRun()
-    """
-
-    status_lookup = {
-        0x0: None,
-        0x1: 'exists',
-        0x2: 'inprogress',
-        0x3: 'success',
-        0x4: 'uxsuccess',
-        0x5: 'skip',
-        0x6: 'fail',
-        0x7: 'xfail',
-        }
-
-    def __init__(self, source, non_subunit_name=None):
-        """Create a ByteStreamToStreamResult.
-
-        :param source: A file like object to read bytes from. Must support
-            read(<count>) and return bytes. The file is not closed by
-            ByteStreamToStreamResult. subunit.make_stream_binary() is
-            called on the stream to get it into bytes mode.
-        :param non_subunit_name: If set to non-None, non subunit content
-            encountered in the stream will be converted into file packets
-            labelled with this name.
-        """
-        self.non_subunit_name = non_subunit_name
-        self.source = subunit.make_stream_binary(source)
-        self.codec = codecs.lookup('utf8').incrementaldecoder()
-
-    def run(self, result):
-        """Parse source and emit events to result.
-
-        This is a blocking call: it will run until EOF is detected on source.
-        """
-        self.codec.reset()
-        mid_character = False
-        while True:
-            # We're in blocking mode; read one char
-            content = self.source.read(1)
-            if not content:
-                # EOF
-                return
-            if not mid_character and content[0] == SIGNATURE[0]:
-                self._parse_packet(result)
-                continue
-            if self.non_subunit_name is None:
-                raise Exception("Non subunit content", content)
-            try:
-                if self.codec.decode(content):
-                    # End of a character
-                    mid_character = False
-                else:
-                    mid_character = True
-            except UnicodeDecodeError:
-                # Bad unicode, not our concern.
-                mid_character = False
-            # Aggregate all content that is not subunit until either
-            # 1MiB is accumulated or 50ms has passed with no input.
-            # Both are arbitrary amounts intended to give a simple
-            # balance between efficiency (avoiding death by a thousand
-            # one-byte packets), buffering (avoiding overlarge state
-            # being hidden on intermediary nodes) and interactivity
-            # (when driving a debugger, slow response to typing is
-            # annoying).
-            buffered = [content]
-            while len(buffered[-1]):
-                try:
-                    self.source.fileno()
-                except:
-                    # Won't be able to select, fallback to
-                    # one-byte-at-a-time.
-                    break
-                # Note: this has a very low timeout because with stdin, the
-                # BufferedIO layer typically has all the content available
-                # from the stream when e.g. pdb is dropped into, leading to
-                # select always timing out when in fact we could have read
-                # (from the buffer layer) - we typically fail to aggregate
-                # any content on 3.x Pythons.
-                readable = select.select([self.source], [], [], 0.000001)[0]
-                if readable:
-                    content = self.source.read(1)
-                    if not len(content):
-                        # EOF - break and emit buffered.
-                        break
-                    if not mid_character and content[0] == SIGNATURE[0]:
-                        # New packet, break, emit buffered, then parse.
-                        break
-                    buffered.append(content)
-                    # Feed into the codec.
-                    try:
-                        if self.codec.decode(content):
-                            # End of a character
-                            mid_character = False
-                        else:
-                            mid_character = True
-                    except UnicodeDecodeError:
-                        # Bad unicode, not our concern.
-                        mid_character = False
-                if not readable or len(buffered) >= 1048576:
-                    # timeout or too much data, emit what we have.
-                    break
-            result.status(
-                file_name=self.non_subunit_name,
-                file_bytes=b''.join(buffered))
-            if mid_character or not len(content) or content[0] != SIGNATURE[0]:
-                continue
-            # Otherwise, parse a data packet.
-            self._parse_packet(result)
-
-    def _parse_packet(self, result):
-        try:
-            packet = [SIGNATURE]
-            self._parse(packet, result)
-        except ParseError as error:
-            result.status(test_id="subunit.parser", eof=True,
-                file_name="Packet data", file_bytes=b''.join(packet),
-                mime_type="application/octet-stream")
-            result.status(test_id="subunit.parser", test_status='fail',
-                eof=True, file_name="Parser Error",
-                file_bytes=(error.args[0]).encode('utf8'),
-                mime_type="text/plain;charset=utf8")
-
-    def _to_bytes(self, data, pos, length):
-        """Return a slice of data from pos for length as bytes."""
-        # memoryview in 2.7.3 and 3.2 isn't directly usable with struct :(.
-        # see https://bugs.launchpad.net/subunit/+bug/1216163
-        result = data[pos:pos+length]
-        if type(result) is not bytes:
-            return result.tobytes()
-        return result
-
-    def _parse_varint(self, data, pos, max_3_bytes=False):
-        # because the only incremental IO we do is at the start, and the 32 bit
-        # CRC means we can always safely read enough to cover any varint, we
-        # can be sure that there should be enough data - and if not it is an
-        # error not a normal situation.
-        data_0 = struct.unpack(FMT_8, self._to_bytes(data, pos, 1))[0]
-        typeenum = data_0 & 0xc0
-        value_0 = data_0 & 0x3f
-        if typeenum == 0x00:
-            return value_0, 1
-        elif typeenum == 0x40:
-            data_1 = struct.unpack(FMT_8, self._to_bytes(data, pos+1, 1))[0]
-            return (value_0 << 8) | data_1, 2
-        elif typeenum == 0x80:
-            data_1 = struct.unpack(FMT_16, self._to_bytes(data, pos+1, 2))[0]
-            return (value_0 << 16) | data_1, 3
-        else:
-            if max_3_bytes:
-                raise ParseError('3 byte maximum given but 4 byte value found.')
-            data_1, data_2 = struct.unpack(FMT_24, self._to_bytes(data, pos+1, 3))
-            result = (value_0 << 24) | data_1 << 8 | data_2
-            return result, 4
-
-    def _parse(self, packet, result):
-            # 2 bytes flags, at most 3 bytes length.
-            packet.append(self.source.read(5))
-            flags = struct.unpack(FMT_16, packet[-1][:2])[0]
-            length, consumed = self._parse_varint(
-                packet[-1], 2, max_3_bytes=True)
-            remainder = self.source.read(length - 6)
-            if len(remainder) != length - 6:
-                raise ParseError(
-                    'Short read - got %d bytes, wanted %d bytes' % (
-                    len(remainder), length - 6))
-            if consumed != 3:
-                # Avoid having to parse torn values
-                packet[-1] += remainder
-                pos = 2 + consumed
-            else:
-                # Avoid copying potentially lots of data.
-                packet.append(remainder)
-                pos = 0
-            crc = zlib.crc32(packet[0])
-            for fragment in packet[1:-1]:
-                crc = zlib.crc32(fragment, crc)
-            crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff
-            packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0]
-            if crc != packet_crc:
-                # Bad CRC, report it and stop parsing the packet.
-                raise ParseError(
-                    'Bad checksum - calculated (0x%x), stored (0x%x)'
-                        % (crc, packet_crc))
-            if safe_hasattr(builtins, 'memoryview'):
-                body = memoryview(packet[-1])
-            else:
-                body = packet[-1]
-            # Discard CRC-32
-            body = body[:-4]
-            # One packet could have both file and status data; the Python API
-            # presents these separately (perhaps it shouldn't?)
-            if flags & FLAG_TIMESTAMP:
-                seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0]
-                nanoseconds, consumed = self._parse_varint(body, pos+4)
-                pos = pos + 4 + consumed
-                timestamp = EPOCH + datetime.timedelta(
-                    seconds=seconds, microseconds=nanoseconds/1000)
-            else:
-                timestamp = None
-            if flags & FLAG_TEST_ID:
-                test_id, pos = self._read_utf8(body, pos)
-            else:
-                test_id = None
-            if flags & FLAG_TAGS:
-                tag_count, consumed = self._parse_varint(body, pos)
-                pos += consumed
-                test_tags = set()
-                for _ in range(tag_count):
-                    tag, pos = self._read_utf8(body, pos)
-                    test_tags.add(tag)
-            else:
-                test_tags = None
-            if flags & FLAG_MIME_TYPE:
-                mime_type, pos = self._read_utf8(body, pos)
-            else:
-                mime_type = None
-            if flags & FLAG_FILE_CONTENT:
-                file_name, pos = self._read_utf8(body, pos)
-                content_length, consumed = self._parse_varint(body, pos)
-                pos += consumed
-                file_bytes = self._to_bytes(body, pos, content_length)
-                if len(file_bytes) != content_length:
-                    raise ParseError('File content extends past end of packet: '
-                        'claimed %d bytes, %d available' % (
-                        content_length, len(file_bytes)))
-                pos += content_length
-            else:
-                file_name = None
-                file_bytes = None
-            if flags & FLAG_ROUTE_CODE:
-                route_code, pos = self._read_utf8(body, pos)
-            else:
-                route_code = None
-            runnable = bool(flags & FLAG_RUNNABLE)
-            eof = bool(flags & FLAG_EOF)
-            test_status = self.status_lookup[flags & 0x0007]
-            result.status(test_id=test_id, test_status=test_status,
-                test_tags=test_tags, runnable=runnable, mime_type=mime_type,
-                eof=eof, file_name=file_name, file_bytes=file_bytes,
-                route_code=route_code, timestamp=timestamp)
-    __call__ = run
-
-    def _read_utf8(self, buf, pos):
-        length, consumed = self._parse_varint(buf, pos)
-        pos += consumed
-        utf8_bytes = buf[pos:pos+length]
-        if length != len(utf8_bytes):
-            raise ParseError(
-                'UTF8 string at offset %d extends past end of packet: '
-                'claimed %d bytes, %d available' % (pos - 2, length,
-                len(utf8_bytes)))
-        if has_nul(utf8_bytes):
-            raise ParseError('UTF8 string at offset %d contains NUL byte' % (
-                pos-2,))
-        try:
-            utf8, decoded_bytes = utf_8_decode(utf8_bytes)
-            if decoded_bytes != length:
-                raise ParseError("Invalid (partially decodable) string at "
-                    "offset %d, %d undecoded bytes" % (
-                    pos-2, length - decoded_bytes))
-            return utf8, length+pos
-        except UnicodeDecodeError:
-            raise ParseError('UTF8 string at offset %d is not UTF8' % (pos-2,))
diff --git a/lib/subunit/setup.py b/lib/subunit/setup.py
deleted file mode 100755
index d42d3d7..0000000
--- a/lib/subunit/setup.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-import os.path
-try:
-    # If the user has setuptools / distribute installed, use it
-    from setuptools import setup
-except ImportError:
-    # Otherwise, fall back to distutils.
-    from distutils.core import setup
-    extra = {}
-else:
-    extra = {
-        'install_requires': [
-            'extras',
-            'testtools>=0.9.34',
-        ],
-        'tests_require': [
-            'testscenarios',
-        ],
-    }
-
-
-def _get_version_from_file(filename, start_of_line, split_marker):
-    """Extract version from file, giving last matching value or None"""
-    try:
-        return [x for x in open(filename)
-            if x.startswith(start_of_line)][-1].split(split_marker)[1].strip()
-    except (IOError, IndexError):
-        return None
-
-
-VERSION = (
-    # Assume we are in a distribution, which has PKG-INFO
-    _get_version_from_file('PKG-INFO', 'Version:', ':')
-    # Must be a development checkout, so use the Makefile
-    or _get_version_from_file('Makefile', 'VERSION', '=')
-    or "0.0")
-
-
-relpath = os.path.dirname(__file__)
-if relpath:
-    os.chdir(relpath)
-setup(
-    name='python-subunit',
-    version=VERSION,
-    description=('Python implementation of subunit test streaming protocol'),
-    long_description=open('README').read(),
-    classifiers=[
-        'Intended Audience :: Developers',
-        'Programming Language :: Python :: 3',
-        'Programming Language :: Python',
-        'Topic :: Software Development :: Testing',
-    ],
-    keywords='python test streaming',
-    author='Robert Collins',
-    author_email='subunit-dev at lists.launchpad.net',
-    url='http://launchpad.net/subunit',
-    packages=['subunit', 'subunit.tests'],
-    package_dir={'subunit': 'python/subunit'},
-    scripts = [
-        'filters/subunit-1to2',
-        'filters/subunit-2to1',
-        'filters/subunit-filter',
-        'filters/subunit-ls',
-        'filters/subunit-notify',
-        'filters/subunit-output',
-        'filters/subunit-stats',
-        'filters/subunit-tags',
-        'filters/subunit2csv',
-        'filters/subunit2gtk',
-        'filters/subunit2junitxml',
-        'filters/subunit2pyunit',
-        'filters/tap2subunit',
-    ],
-    **extra
-)
diff --git a/lib/subunit/shell/README b/lib/subunit/shell/README
deleted file mode 100644
index af894a2..0000000
--- a/lib/subunit/shell/README
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-#  subunit shell bindings.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-This tree contains shell bindings to the subunit protocol. They are written
-entirely in shell, and unit tested in shell. See the tests/ directory for the
-test scripts. You can use `make check` to run the tests. There is a trivial
-python test_shell.py which uses the pyunit gui to expose the test results in a
-compact form.
-
-The shell bindings consist of four functions which you can use to output test
-metadata trivially. See share/subunit.sh for the functions and comments.
-
-However, this is not a full test environment, its support code for reporting to
-subunit. You can look at ShUnit (http://shunit.sourceforge.net) for 'proper'
-shell based xUnit functionality. There is a patch for ShUnit 1.3
-(subunit-ui.patch) in the subunit source tree. I hope to have that integrated
-upstream in the near future. I will delete the copy of the patch in the subunit
-tree a release or two later.
-
-If you are a test environment maintainer - either homegrown, or ShUnit or some
-such, you will need to see how the subunit calls should be used. Here is what
-a manually written test using the bindings might look like:
-
-
-subunit_start_test "test name"
-# determine if test passes or fails
-result=$(something)
-if [ $result == 0 ]; then
-  subunit_pass_test "test name"
-else
-  subunit_fail_test "test name" <<END
-Something went wrong running something:
-exited with result: '$func_status'
-END
-fi
-
-Which when run with a subunit test runner will generate something like:
-test name ... ok
-
-on success, and:
-
-test name ... FAIL
-
-======================================================================
-FAIL: test name
-----------------------------------------------------------------------
-RemoteError:
-Something went wrong running something:
-exited with result: '1'
diff --git a/lib/subunit/shell/share/subunit.sh b/lib/subunit/shell/share/subunit.sh
deleted file mode 100644
index a532388..0000000
--- a/lib/subunit/shell/share/subunit.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-#  subunit.sh: shell functions to report test status via the subunit protocol.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-subunit_start_test () {
-  # emit the current protocol start-marker for test $1
-  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
-  echo "test: $1"
-}
-
-
-subunit_pass_test () {
-  # emit the current protocol test passed marker for test $1
-  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
-  echo "success: $1"
-}
-
-
-subunit_fail_test () {
-  # emit the current protocol fail-marker for test $1, and emit stdin as
-  # the error text.
-  # we use stdin because the failure message can be arbitrarily long, and this
-  # makes it convenient to write in scripts (using <<END syntax.
-  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
-  echo "failure: $1 ["
-  cat -
-  echo "]"
-}
-
-
-subunit_error_test () {
-  # emit the current protocol error-marker for test $1, and emit stdin as
-  # the error text.
-  # we use stdin because the failure message can be arbitrarily long, and this
-  # makes it convenient to write in scripts (using <<END syntax.
-  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
-  echo "error: $1 ["
-  cat -
-  echo "]"
-}
-
-
-subunit_skip_test () {
-  # emit the current protocol test skipped marker for test $1
-  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
-  echo "skip: $1"
-}
-
-
diff --git a/lib/subunit/shell/tests/test_function_output.sh b/lib/subunit/shell/tests/test_function_output.sh
deleted file mode 100755
index 00b0844..0000000
--- a/lib/subunit/shell/tests/test_function_output.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-#  subunit shell bindings.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-
-# this script tests the output of the methods. As each is tested we start using
-# it.
-# So the first test manually implements the entire protocol, the next uses the
-# start method and so on.
-# it is assumed that we are running from the 'shell' tree root in the source
-# of subunit, and that the library sourcing tests have all passed - if they 
-# have not, this test script may well fail strangely.
-
-# import the library.
-. ${SHELL_SHARE}subunit.sh
-
-echo 'test: subunit_start_test output'
-func_output=$(subunit_start_test "foo bar"|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xtest: foo bar" ]; then
-  echo 'success: subunit_start_test output'
-else
-  echo 'failure: subunit_start_test output ['
-  echo 'got an error code or incorrect output:'
-  echo "exit: $func_status"
-  echo "output: '$func_output'"
-  echo ']' ;
-fi
-
-subunit_start_test "subunit_pass_test output"
-func_output=$(subunit_pass_test "foo bar"|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xsuccess: foo bar" ]; then
-  subunit_pass_test "subunit_pass_test output"
-else
-  echo 'failure: subunit_pass_test output ['
-  echo 'got an error code or incorrect output:'
-  echo "exit: $func_status"
-  echo "output: '$func_output'"
-  echo ']' ;
-fi
-
-subunit_start_test "subunit_fail_test output"
-func_output=$((subunit_fail_test "foo bar" <<END
-something
-  wrong
-here
-END
-)|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xfailure: foo bar [
-something
-  wrong
-here
-]" ]; then
-  subunit_pass_test "subunit_fail_test output"
-else
-  echo 'failure: subunit_fail_test output ['
-  echo 'got an error code or incorrect output:'
-  echo "exit: $func_status"
-  echo "output: '$func_output'"
-  echo ']' ;
-fi
-
-subunit_start_test "subunit_error_test output"
-func_output=$((subunit_error_test "foo bar" <<END
-something
-  died
-here
-END
-)| grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xerror: foo bar [
-something
-  died
-here
-]" ]; then
-  subunit_pass_test "subunit_error_test output"
-else
-  subunit_fail_test "subunit_error_test output" <<END
-got an error code or incorrect output:
-exit: $func_status
-output: '$func_output'
-END
-fi
diff --git a/lib/subunit/shell/tests/test_source_library.sh b/lib/subunit/shell/tests/test_source_library.sh
deleted file mode 100755
index 699f128..0000000
--- a/lib/subunit/shell/tests/test_source_library.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-#  subunit shell bindings.
-#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
-#
-#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-#  license at the users choice. A copy of both licenses are available in the
-#  project source as Apache-2.0 and BSD. You may not use this file except in
-#  compliance with one of these two licences.
-#  
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
-#  license you chose for the specific language governing permissions and
-#  limitations under that license.
-#
-
-
-# this script tests that we can source the subunit shell bindings successfully.
-# It manually implements the control protocol so that it des not depend on the
-# bindings being complete yet.
-
-# we expect to be run from the tree root.
-
-echo 'test: shell bindings can be sourced'
-# if any output occurs, this has failed to source cleanly
-source_output=$(. ${SHELL_SHARE}subunit.sh 2>&1)
-if [ $? == 0 -a "x$source_output" = "x" ]; then
-  echo 'success: shell bindings can be sourced'
-else
-  echo 'failure: shell bindings can be sourced ['
-  echo 'got an error code or output during sourcing.:'
-  echo $source_output
-  echo ']' ;
-fi
-
-# now source it for real
-. ${SHELL_SHARE}subunit.sh
-
-# we should have a start_test function
-echo 'test: subunit_start_test exists'
-found_type=$(type -t subunit_start_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
-  echo 'success: subunit_start_test exists'
-else
-  echo 'failure: subunit_start_test exists ['
-  echo 'subunit_start_test is not a function:'
-  echo "type -t status: $status"
-  echo "output: $found_type"
-  echo ']' ;
-fi
-
-# we should have a pass_test function
-echo 'test: subunit_pass_test exists'
-found_type=$(type -t subunit_pass_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
-  echo 'success: subunit_pass_test exists'
-else
-  echo 'failure: subunit_pass_test exists ['
-  echo 'subunit_pass_test is not a function:'
-  echo "type -t status: $status"
-  echo "output: $found_type"
-  echo ']' ;
-fi
-
-# we should have a fail_test function
-echo 'test: subunit_fail_test exists'
-found_type=$(type -t subunit_fail_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
-  echo 'success: subunit_fail_test exists'
-else
-  echo 'failure: subunit_fail_test exists ['
-  echo 'subunit_fail_test is not a function:'
-  echo "type -t status: $status"
-  echo "output: $found_type"
-  echo ']' ;
-fi
-
-# we should have a error_test function
-echo 'test: subunit_error_test exists'
-found_type=$(type -t subunit_error_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
-  echo 'success: subunit_error_test exists'
-else
-  echo 'failure: subunit_error_test exists ['
-  echo 'subunit_error_test is not a function:'
-  echo "type -t status: $status"
-  echo "output: $found_type"
-  echo ']' ;
-fi
-
-# we should have a skip_test function
-echo 'test: subunit_skip_test exists'
-found_type=$(type -t subunit_skip_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
-  echo 'success: subunit_skip_test exists'
-else
-  echo 'failure: subunit_skip_test exists ['
-  echo 'subunit_skip_test is not a function:'
-  echo "type -t status: $status"
-  echo "output: $found_type"
-  echo ']' ;
-fi
-
diff --git a/lib/update-external.sh b/lib/update-external.sh
index efb4e67..e3d83d5 100755
--- a/lib/update-external.sh
+++ b/lib/update-external.sh
@@ -13,8 +13,8 @@ echo "Updating subunit..."
 git clone git://github.com/testing-cabal/subunit "$WORKDIR/subunit"
 rm -rf "$WORKDIR/subunit/.git"
 # Preserve wscript file
-cp "$LIBDIR/subunit/c/wscript" "$WORKDIR/subunit/c/wscript"
-rsync -avz --delete "$WORKDIR/subunit/" "$LIBDIR/subunit/"
+cp "$THIRD_PARTY_DIR/subunit/c/wscript" "$WORKDIR/subunit/c/wscript"
+rsync -avz --delete "$WORKDIR/subunit/" "$THIRD_PARTY_DIR/subunit/"
 
 echo "Updating testtools..."
 git clone git://github.com/testing-cabal/testtools "$WORKDIR/testtools"
@@ -24,7 +24,7 @@ rsync -avz --delete "$WORKDIR/testtools/" "$THIRD_PARTY_DIR/testtools/"
 echo "Updating dnspython..."
 git clone git://www.dnspython.org/dnspython.git "$WORKDIR/dnspython"
 rm -rf "$WORKDIR/dnspython/.git"
-rsync -avz --delete "$WORKDIR/dnspython/" "$LIBDIR/dnspython/"
+rsync -avz --delete "$WORKDIR/dnspython/" "$THIRD_PARTY_DIR/dnspython/"
 
 echo "Updating pep8..."
 git clone git://github.com/jcrocholl/pep8 "$WORKDIR/pep8"
diff --git a/lib/wscript_build b/lib/wscript_build
index e7751fd..2432df5 100644
--- a/lib/wscript_build
+++ b/lib/wscript_build
@@ -4,7 +4,6 @@ import os, Options
 
 # work out what python external libraries we need to install
 external_libs = {
-    "subunit": "subunit/python/subunit",
     }
 
 list = []
diff --git a/python/samba/tests/subunitrun.py b/python/samba/tests/subunitrun.py
index eaecd46..ec54db4 100755
--- a/python/samba/tests/subunitrun.py
+++ b/python/samba/tests/subunitrun.py
@@ -45,7 +45,7 @@ def check_subunit(mod):
         return False
     else:
         return True
-samba.ensure_external_module("subunit", "subunit/python")
+samba.ensure_third_party_module("subunit", "subunit/python", check_subunit)
 import subunit.run
 
 
diff --git a/selftest/selftesthelpers.py b/selftest/selftesthelpers.py
index 2c6f87f..4271d1e 100644
--- a/selftest/selftesthelpers.py
+++ b/selftest/selftesthelpers.py
@@ -68,8 +68,8 @@ else:
 python = os.getenv("PYTHON", "python")
 
 # Set a default value, overridden if we find a working one on the system
-tap2subunit = "PYTHONPATH=%s/lib/subunit/python:%s/lib/testtools:%s/third_party/python-extras:%s/third_party/mimeparse %s %s/lib/subunit/filters/tap2subunit" % (srcdir(), srcdir(), srcdir(), srcdir(), python, srcdir())
-subunit2to1 = "PYTHONPATH=%s/lib/subunit/python:%s/lib/testtools:%s/third_party/python-extras:%s/third_party/mimeparse %s %s/lib/subunit/filters/subunit-2to1" % (srcdir(), srcdir(), srcdir(), srcdir(), python, srcdir())
+tap2subunit = "PYTHONPATH=%s/third_party/subunit/python:%s/third_party/testtools:%s/third_party/python-extras:%s/third_party/mimeparse %s %s/third_party/subunit/filters/tap2subunit" % (srcdir(), srcdir(), srcdir(), srcdir(), python, srcdir())
+subunit2to1 = "PYTHONPATH=%s/third_party/subunit/python:%s/third_party/testtools:%s/third_party/python-extras:%s/third_party/mimeparse %s %s/third_party/subunit/filters/subunit-2to1" % (srcdir(), srcdir(), srcdir(), srcdir(), python, srcdir())
 tap2subunit_version = 2
 
 sub = subprocess.Popen("tap2subunit", stdin=subprocess.PIPE,
@@ -184,8 +184,8 @@ def planpythontestsuite(env, module, name=None, extra_path=[]):
     pypath = list(extra_path)
     if not has_system_subunit_run:
         pypath.extend([
-            "%s/lib/subunit/python" % srcdir(),
-            "%s/lib/testtools" % srcdir(),
+            "%s/third_party/subunit/python" % srcdir(),
+            "%s/third_party/testtools" % srcdir(),
             "%s/third_party/python-extras" % srcdir(),
             "%s/third_party/mimeparse" % srcdir()])
     args = [python, "-m", "subunit.run", "$LISTOPT", "$LOADLIST", module]
diff --git a/selftest/subunithelper.py b/selftest/subunithelper.py
index 220f903..2ebb217 100644
--- a/selftest/subunithelper.py
+++ b/selftest/subunithelper.py
@@ -28,7 +28,7 @@ def check_subunit(mod):
         return False
     else:
         return True
-samba.ensure_external_module("subunit", "subunit/python", check_subunit)
+samba.ensure_third_party_module("subunit", "subunit/python", check_subunit)
 
 import re
 import sys
diff --git a/source4/selftest/test_samba3dump.sh b/source4/selftest/test_samba3dump.sh
index 4db142b..f5980a2 100755
--- a/source4/selftest/test_samba3dump.sh
+++ b/source4/selftest/test_samba3dump.sh
@@ -1,7 +1,7 @@
 #!/bin/sh
 # Verify that samba3dump completes.
 
-. lib/subunit/shell/share/subunit.sh
+. third_party/subunit/shell/share/subunit.sh
 
 subunit_start_test samba3dump
 
diff --git a/third_party/subunit/.gitignore b/third_party/subunit/.gitignore
new file mode 100644
index 0000000..30f733a
--- /dev/null
+++ b/third_party/subunit/.gitignore
@@ -0,0 +1,56 @@
+/c/lib/child.os
+/c/tests/test_child
+.sconsign
+.sconsign.dblite
+/m4/**
+aclocal.m4
+compile
+config.guess
+config.h.in
+config.sub
+configure
+depcomp
+install-sh
+ltmain.sh
+missing
+autom4te.cache
+Makefile.in
+py-compile
+.deps
+.dirstamp
+.libs
+*.lo
+libsubunit.la
+libcppunit_subunit.la
+libtool
+stamp-h1
+libsubunit.pc
+libcppunit_subunit.pc
+config.log
+config.status
+Makefile
+config.h
+debian/files
+debian/libsubunit0
+debian/libsubunit-dev
+debian/subunit
+debian/python-subunit
+debian/*.log
+debian/*.debhelper
+debian/tmp
+debian/*.substvars
+/perl/blib
+/perl/pm_to_blib
+subunit-*.tar.gz
+subunit-*.tar.gz.asc
+perl/Makefile.PL
+/.testrepository
+__pycache__
+perl/MYMETA.yml
+/build/
+/dist/
+/*.egg-info/
+*.pyc
+*~
+.*.swp
+.*.swo
diff --git a/third_party/subunit/Apache-2.0 b/third_party/subunit/Apache-2.0
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/third_party/subunit/Apache-2.0
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/third_party/subunit/BSD b/third_party/subunit/BSD
new file mode 100644
index 0000000..fa130cd
--- /dev/null
+++ b/third_party/subunit/BSD
@@ -0,0 +1,26 @@
+Copyright (c) Robert Collins and Subunit contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+3. Neither the name of Robert Collins nor the names of Subunit contributors
+   may be used to endorse or promote products derived from this software
+   without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/third_party/subunit/COPYING b/third_party/subunit/COPYING
new file mode 100644
index 0000000..d298640
--- /dev/null
+++ b/third_party/subunit/COPYING
@@ -0,0 +1,36 @@
+Subunit is licensed under two licenses, the Apache License, Version 2.0 or the
+3-clause BSD License. You may use this project under either of these licenses
+- choose the one that works best for you.
+
+We require contributions to be licensed under both licenses. The primary
+difference between them is that the Apache license takes care of potential
+issues with Patents and other intellectual property concerns. This is
+important to Subunit as Subunit wants to be license compatible in a very
+broad manner to allow reuse and incorporation into other projects.
+
+Generally every source file in Subunit needs a license grant under both these
+licenses.  As the code is shipped as a single unit, a brief form is used:
+----
+Copyright (c) [yyyy][,yyyy]* [name or 'Subunit Contributors']
+
+Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+license at the users choice. A copy of both licenses are available in the
+project source as Apache-2.0 and BSD. You may not use this file except in
+compliance with one of these two licences.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+license you chose for the specific language governing permissions and
+limitations under that license.
+----
+
+Code that has been incorporated into Subunit from other projects will
+naturally be under its own license, and will retain that license.
+
+A known list of such code is maintained here:
+* The python/iso8601 module by Michael Twomey, distributed under an MIT style
+  licence - see python/iso8601/LICENSE for details.
+* The runtests.py and python/subunit/tests/TestUtil.py module are GPL test
+  support modules. There are not installed by Subunit - they are only ever
+  used on the build machine.  Copyright 2004 Canonical Limited.
diff --git a/third_party/subunit/INSTALL b/third_party/subunit/INSTALL
new file mode 100644
index 0000000..29052eb
--- /dev/null
+++ b/third_party/subunit/INSTALL
@@ -0,0 +1,35 @@
+To install subunit
+------------------
+
+Bootstrap::
+  autoreconf -vi
+Configure::
+  ./configure
+Install::
+  make install
+
+Dependencies
+------------
+
+* Python for the filters
+* 'testtools' (On Debian and Ubuntu systems the 'python-testtools' package,
+  the testtools package on pypi, or https://launchpad.net/testtools) for
+  the extended test API which permits attachments. Version 0.9.30 or newer is
+  required. Of particular note, http://testtools.python-hosting.com/ is not
+  the testtools you want.
+* 'testscenarios' (On Debian and Ubuntu systems the 'python-testscenarios'
+  package, the 'testscenarios' package on pypi, or
+  https://launchpad.net/testscenarios) for running some of the python unit tests.
+* A C compiler for the C bindings
+* Perl for the Perl tools (including subunit-diff)
+* Check to run the subunit test suite.
+* python-gtk2 if you wish to use subunit2gtk
+* python-junitxml if you wish to use subunit2junitxml
+* pkg-config for configure detection of supporting libraries.
+
+Binary packages
+---------------
+
+A number of distributions now include subunit, you can try via your package
+manager. The authors maintain a personal package archive on Launchpad::
+  https://launchpad.net/~testing-cabal/+archive/archive
diff --git a/third_party/subunit/MANIFEST.in b/third_party/subunit/MANIFEST.in
new file mode 100644
index 0000000..4f521dc
--- /dev/null
+++ b/third_party/subunit/MANIFEST.in
@@ -0,0 +1,20 @@
+exclude .gitignore
+exclude aclocal.m4
+prune autom4te.cache
+prune c
+prune c++
+prune compile
+exclude configure*
+exclude depcomp
+exclude INSTALL
+exclude install-sh
+exclude lib*
+exclude ltmain.sh
+prune m4
+exclude Makefile*
+exclude missing
+prune perl
+exclude py-compile
+prune shell
+exclude stamp-h1
+include NEWS
diff --git a/third_party/subunit/Makefile.am b/third_party/subunit/Makefile.am
new file mode 100644
index 0000000..e8f018e
--- /dev/null
+++ b/third_party/subunit/Makefile.am
@@ -0,0 +1,147 @@
+EXTRA_DIST =  \
+	.bzrignore \
+	Apache-2.0 \
+	BSD \
+	INSTALL \
+	Makefile.am \
+	NEWS \
+	README \
+	all_tests.py \
+	c++/README \
+	c/README \
+	c/check-subunit-0.9.3.patch \
+	c/check-subunit-0.9.5.patch \
+	c/check-subunit-0.9.6.patch \
+	perl/Makefile.PL.in \
+	perl/lib/Subunit.pm \
+	perl/lib/Subunit/Diff.pm \
+	perl/subunit-diff \
+	python/iso8601/LICENSE \
+	python/iso8601/README \
+	python/iso8601/README.subunit \
+	python/iso8601/setup.py \
+	python/iso8601/test_iso8601.py \
+	python/subunit/tests/__init__.py \
+	python/subunit/tests/sample-script.py \
+	python/subunit/tests/sample-two-script.py \
+	python/subunit/tests/test_chunked.py \
+	python/subunit/tests/test_details.py \
+	python/subunit/tests/test_filters.py \
+	python/subunit/tests/test_output_filter.py \
+	python/subunit/tests/test_progress_model.py \
+	python/subunit/tests/test_run.py \
+	python/subunit/tests/test_subunit_filter.py \
+	python/subunit/tests/test_subunit_stats.py \
+	python/subunit/tests/test_subunit_tags.py \
+	python/subunit/tests/test_tap2subunit.py \
+	python/subunit/tests/test_test_protocol.py \
+	python/subunit/tests/test_test_protocol2.py \
+	python/subunit/tests/test_test_results.py \
+	setup.py \
+	shell/README \
+	shell/share/subunit.sh \
+	shell/subunit-ui.patch \
+	shell/tests/test_function_output.sh \
+	shell/tests/test_source_library.sh
+
+ACLOCAL_AMFLAGS = -I m4
+
+include_subunitdir = $(includedir)/subunit
+
+dist_bin_SCRIPTS = \
+	filters/subunit-1to2 \
+	filters/subunit-2to1 \
+	filters/subunit-filter \
+	filters/subunit-ls \
+	filters/subunit-notify \
+	filters/subunit-output \
+	filters/subunit-stats \
+	filters/subunit-tags \
+	filters/subunit2csv \
+	filters/subunit2gtk \
+	filters/subunit2junitxml \
+	filters/subunit2pyunit \
+	filters/tap2subunit
+
+TESTS = $(check_PROGRAMS)
+
+## install libsubunit.pc
+pcdatadir = $(libdir)/pkgconfig
+pcdata_DATA = \
+	libsubunit.pc \
+	libcppunit_subunit.pc
+
+pkgpython_PYTHON = \
+	python/subunit/__init__.py \
+	python/subunit/chunked.py \
+	python/subunit/details.py \
+	python/subunit/filters.py \
+	python/subunit/iso8601.py \
+	python/subunit/progress_model.py \
+	python/subunit/run.py \
+	python/subunit/v2.py \
+	python/subunit/test_results.py \
+	python/subunit/_output.py
+
+lib_LTLIBRARIES = libsubunit.la
+lib_LTLIBRARIES +=  libcppunit_subunit.la
+
+include_subunit_HEADERS = \
+	c/include/subunit/child.h \
+	c++/SubunitTestProgressListener.h
+
+check_PROGRAMS = \
+	c/tests/test_child
+
+libsubunit_la_SOURCES = \
+	c/lib/child.c \
+	c/include/subunit/child.h
+
+libcppunit_subunit_la_SOURCES = \
+	c++/SubunitTestProgressListener.cpp \
+	c++/SubunitTestProgressListener.h
+
+tests_LDADD = @CHECK_LIBS@ $(top_builddir)/libsubunit.la
+c_tests_test_child_CFLAGS = -I$(top_srcdir)/c/include $(SUBUNIT_CFLAGS) @CHECK_CFLAGS@
+c_tests_test_child_LDADD = $(tests_LDADD)
+
+
+all-local: perl/Makefile
+	$(MAKE) -C perl all
+
+check-local: perl/Makefile
+	$(MAKE) -C perl check
+	SHELL_SHARE='$(top_srcdir)/shell/share/' \
+	PYTHONPATH='$(abs_top_srcdir)/python':'$(abs_top_srcdir)':${PYTHONPATH} \
+	$(PYTHON) -m testtools.run all_tests.test_suite
+
+clean-local:
+	find . -type f -name "*.pyc" -exec rm {} ';'
+	rm -f perl/Makefile
+
+# Remove perl dir for VPATH builds.
+distclean-local:
+	-rmdir perl > /dev/null
+	-rm perl/Makefile.PL > /dev/null
+
+install-exec-local: perl/Makefile
+	$(MAKE) -C perl install
+
+mostlyclean-local:
+	rm -rf perl/blib
+	rm -rf perl/pm_to_blib
+
+# 'uninstall' perl files during distcheck
+uninstall-local:
+	if [ "_inst" = `basename ${prefix}` ]; then \
+	  $(MAKE) -C perl uninstall_distcheck; \
+	    rm -f "$(DESTDIR)$(bindir)"/subunit-diff; \
+	fi
+
+# The default for MakeMaker; can be overridden by exporting
+INSTALLDIRS ?= site
+
+perl/Makefile: perl/Makefile.PL
+	mkdir -p perl
+	cd perl && perl Makefile.PL INSTALLDIRS=${INSTALLDIRS}
+	-rm perl/Makefile.old > /dev/null
diff --git a/third_party/subunit/NEWS b/third_party/subunit/NEWS
new file mode 100644
index 0000000..2b310c8
--- /dev/null
+++ b/third_party/subunit/NEWS
@@ -0,0 +1,547 @@
+---------------------
+subunit release notes
+---------------------
+
+NEXT (In development)
+---------------------
+
+0.0.21
+------
+
+BUGFIXES
+~~~~~~~~
+
+* Brown bag bugfix - 0.0.20's setup.py referenced cvs not csv.
+  (Robert Collins, #1361924)
+
+0.0.20
+------
+
+BUGFIXES
+~~~~~~~~
+
+* subunit2csv is now installed when using pip.
+  (Robert Collins, #1279669)
+
+* testscenarios is now a test dependency, not an install dependency.
+  (Arfrever Frehtes Taifersar Arahesis, #1292757)
+
+* The python-subunit tarball can now have setup run from the current
+  directory. (Robert Collins, #1361857)
+
+0.0.19
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* ``subunit.run`` in Python will now exit 0 as long as the test stream has
+  been generated correctly - this has always been the intent but API friction
+  with testtools had prevented it working.
+  (Robert Collins)
+
+0.0.18
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Fix compatibility with testtools 0.9.35 which dropped the 'all' compat
+  symbol. This breaks support for Python versions lower than 2.6.
+  (Robert Collins, #1274056)
+
+0.0.17
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Add ``subunit-output`` tool that can generate a Subunit v2 bytestream from
+  arguments passed on the command line. (Thomi Richards, #1252084)
+
+0.0.16
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Perl files should now honour perl system config.
+  (Benedikt Morbach, #1233198)
+
+* Python 3.1 and 3.2 have an inconsistent memoryview implementation which
+  required a workaround for NUL byte detection. (Robert Collins, #1216246)
+
+* The test suite was failing 6 tests due to testtools changing it's output
+  formatting of exceptions. (Robert Collins)
+
+* V2 parser errors now set appropriate mime types for the encapsulated packet
+  data and the error message. (Robert Collins)
+
+* When tests fail to import ``python subunit.run -l ...`` will now write a
+  subunit file attachment listing the failed imports and exit 2, rather than
+  listing the stub objects from the importer and exiting 0.
+  (Robert Collins, #1245672)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Most filters will now accept a file path argument instead of only reading
+  from stdin. (Robert Collins, #409206)
+
+0.0.15
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Clients of subunit did not expect memoryview objects in StreamResult events.
+  (Robert Collins)
+
+* Memoryview and struct were mutually incompatible in 2.7.3 and 3.2.
+  (Robert Collins, #1216163)
+
+0.0.14
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Memoryview detection was broken and thus it's use was never really tested.
+  (Robert Collins, 1216101)
+
+* TestProtocol2's tag tests were set sort order dependent.
+  (Robert Collins, #1025392)
+
+* TestTestProtocols' test_tags_both was set sort order dependent.
+  (Robert Collins, #1025392)
+
+* TestTestProtocols' test_*_details were dictionary sort order dependent.
+  (Robert Collins, #1025392)
+
+* TestSubUnitTags's test_add_tag was also se sort order dependent.
+  (Robert Collins, #1025392)
+
+0.0.13
+------
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* subunit should now build with automake 1.11 again. (Robert Collins)
+
+* `subunit-stats` no longer outputs encapsulated stdout as subunit.
+  (Robert Collins, #1171987)
+
+* The logic for `subunit.run` is now importable via python -
+  `subunit.run.main`. (Robert Collins, #606770)
+
+BUG FIXES
+~~~~~~~~~
+
+* Removed GPL files that were (C) non Subunit Developers - they are
+  incompatible for binary distribution, which affects redistributors.
+  (Robert Collins, #1185591)
+
+0.0.12
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* Subunit v2 packets with both file content and route code were not being
+  parsed correctly - they would incorrectly emit a parser error, due to trying
+  to parse the route code length from the first byes of the file content.
+  (Robert Collins, 1172815)
+
+0.0.11
+------
+
+v2 protocol draft included in this release. The v2 protocol trades off human
+readability for a massive improvement in robustness, the ability to represent
+concurrent tests in a single stream, cheaper parsing, and that provides
+significantly better in-line debugging support and structured forwarding
+of non-test data (such as stdout or stdin data).
+
+This change includes two new filters (subunit-1to2 and subunit-2to1). Use
+these filters to convert old streams to v2 and convert v2 streams to v1.
+
+All the other filters now only parse and emit v2 streams. V2 is still in
+draft format, so if you want to delay and wait for v2 to be finalised, you
+should use subunit-2to1 before any serialisation steps take place.
+With the ability to encapsulate multiple non-test streams, another significant
+cange is that filters which emit subunit now encapsulate any non-subunit they
+encounter, labelling it 'stdout'. This permits multiplexing such streams and
+detangling the stdout streams from each input.
+
+The subunit libraries (Python etc) have not changed their behaviour: they
+still emit v1 from their existing API calls. New API's are being added
+and applications should migrate once their language has those API's available.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* ``subunit.run`` now replaces sys.stdout to ensure that stdout is unbuffered
+  - without this pdb output is not reliably visible when stdout is a pipe
+  as it usually is. (Robert Collins)
+
+* v2 protocol draft included in this release. (Python implementation only so
+  far). (Robert Collins)
+
+* Two new Python classes -- ``StreamResultToBytes`` and
+  ``ByteStreamToStreamResult`` handle v2 generation and parsing.
+  (Robert Collins)
+
+0.0.10
+------
+
+BUG FIXES
+~~~~~~~~~
+
+* make_stream_binary is now public for reuse. (Robert Collins)
+
+* NAME was not defined in the protocol BNF. (Robert Collins)
+
+* UnsupportedOperation is available in the Python2.6 io library, so ask
+  forgiveness rather than permission for obtaining it. (Robert Collins)
+
+* Streams with no fileno() attribute are now supported, but they are not
+  checked for being in binary mode: be sure to take care of that if using
+  the library yourself. (Robert Collins)
+
+0.0.9
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* All the source files are now included in the distribution tarball.
+  (Arfrever Frehtes Taifersar Arahesis, Robert Collins, #996275)
+
+* ``python/subunit/tests/test_run.py`` and ``python/subunit/filters.py`` were
+  not included in the 0.0.8 tarball. (Robert Collins)
+
+* Test ids which include non-ascii unicode characters are now supported.
+  (Robert Collins, #1029866)
+
+* The ``failfast`` option to ``subunit.run`` will now work. The dependency on
+  testtools has been raised to 0.9.23 to permit this.
+  (Robert Collins, #1090582)
+
+0.0.8
+-----
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Perl module now correctly outputs "failure" instead of "fail".  (Stewart Smith)
+
+* Shell functions now output timestamps. (Stewart Smith, Robert Collins)
+
+* 'subunit2csv' script that converts subunit output to CSV format.
+  (Jonathan Lange)
+
+* ``TagCollapsingDecorator`` now correctly distinguishes between local and
+  global tags.  (Jonathan Lange)
+
+* ``TestResultFilter`` always forwards ``time:`` events.
+  (Benji York, Brad Crittenden)
+
+BUG FIXES
+~~~~~~~~~
+
+* Add 'subunit --no-xfail', which will omit expected failures from the subunit
+  stream. (John Arbash Meinel, #623642)
+
+* Add 'subunit -F/--only-genuine-failures' which sets all of '--no-skips',
+  '--no-xfail', '--no-passthrough, '--no-success', and gives you just the
+  failure stream. (John Arbash Meinel)
+
+* Python2.6 support was broken by the fixup feature.
+  (Arfrever Frehtes Taifersar Arahesis, #987490)
+
+* Python3 support regressed in trunk.
+  (Arfrever Frehtes Taifersar Arahesis, #987514)
+
+* Python3 support was insufficiently robust in detecting unicode streams.
+  (Robert Collins, Arfrever Frehtes Taifersar Arahesis)
+
+* Tag support has been implemented for TestProtocolClient.
+  (Robert Collins, #518016)
+
+* Tags can now be filtered. (Jonathan Lange, #664171)
+
+* Test suite works with latest testtools (but not older ones - formatting
+  changes only). (Robert Collins)
+
+0.0.7
+-----
+
+The Subunit Python test runner ``python -m subunit.run`` can now report the
+test ids and also filter via a test id list file thanks to improvements in
+``testtools.run``. See the testtools manual, or testrepository - a major
+user of such functionality.
+
+Additionally the protocol now has a keyword uxsuccess for Unexpected Success
+reporting. Older parsers will report tests with this status code as 'lost
+connection'.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Add ``TimeCollapsingDecorator`` which collapses multiple sequential time()
+  calls into just the first and last. (Jonathan Lange)
+
+* Add ``TagCollapsingDecorator`` which collapses many tags() calls into one
+  where possible. (Jonathan Lange, Robert Collins)
+
+* Force flush of writes to stdout in c/tests/test_child.
+  (Jelmer Vernooij, #687611)
+
+* Provisional Python 3.x support.
+  (Robert Collins, Tres Seaver, Martin[gz], #666819)
+
+* ``subunit.chunked.Decoder`` Python class takes a new ``strict`` option,
+  which defaults to ``True``. When ``False``, the ``Decoder`` will accept
+  incorrect input that is still unambiguous. i.e. subunit will not barf if
+  a \r is missing from the input. (Martin Pool)
+
+* ``subunit-filter`` preserves the relative ordering of ``time:`` statements,
+  so you can now use filtered streams to gather data about how long it takes
+  to run a test. (Jonathan Lange, #716554)
+
+* ``subunit-ls`` now handles a stream with time: instructions that start
+  partway through the stream (which may lead to strange times) more gracefully.
+  (Robert Collins, #785954)
+
+* ``subunit-ls`` should handle the new test outcomes in Python2.7 better.
+  (Robert Collins, #785953)
+
+* ``TestResultFilter`` now collapses sequential calls to time().
+  (Jonathan Lange, #567150)
+
+* ``TestResultDecorator.tags()`` now actually works, and is no longer a buggy
+  copy/paste of ``TestResultDecorator.time()``. (Jonathan Lange, #681828)
+
+* ``TestResultFilter`` now supports a ``fixup_expected_failures``
+  argument. (Jelmer Vernooij, #755241)
+
+* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
+  per ``testtools.run``. This required a dependency bump due to a small
+  API change in ``testtools``. (Robert Collins)
+
+* The help for subunit-filter was confusing about the behaviour of ``-f`` /
+  ``--no-failure``. (Robert Collins, #703392)
+
+* The Python2.7 / testtools addUnexpectedSuccess API is now supported. This
+  required adding a new status code to the protocol. (Robert Collins, #654474)
+
+CHANGES
+~~~~~~~
+
+* testtools 0.9.11 or newer is new needed (due to the Python 3 support).
+  (Robert Collins)
+
+0.0.6
+-----
+
+This release of subunit fixes a number of unicode related bugs. This depends on
+testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
+is also an optional native setup.py file for use with easy_install and the
+like.
+
+BUG FIXES
+~~~~~~~~~
+
+* Be consistent about delivering unicode content to testtools StringException
+  class which has become (appropriately) conservative. (Robert Collins)
+
+* Fix incorrect reference to subunit_test_failf in c/README.
+  (Brad Hards, #524341)
+
+* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
+  is purely cosmetic as the parameters are passed down with no interpretation.
+  (Robert Collins, #537611)
+
+* Old style tracebacks with no encoding info are now treated as UTF8 rather
+  than some-random-codec-like-ascii. (Robert Collins)
+
+* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
+  binary mode by calling into msvcrt; this avoids having their input or output
+  mangled by the default line ending translation on that platform.
+  (Robert Collins, Martin [gz], #579296)
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* Subunit now has a setup.py for python deployments that are not using
+  distribution packages. (Tres Seaver, #538181)
+
+* Subunit now supports test discovery by building on the testtools support for
+  it. You can take advantage of it with "python -m subunit.run discover [path]"
+  and see "python -m subunit.run discover --help" for more options.
+
+* Subunit now uses the improved unicode support in testtools when outputting
+  non-details based test information; this should consistently UTF8 encode such
+  strings.
+
+* The Python TestProtocolClient now flushes output on startTest and stopTest.
+  (Martin [gz]).
+
+
+0.0.5
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* make check was failing if subunit wasn't installed due to a missing include
+  path for the test program test_child.
+
+* make distcheck was failing due to a missing $(top_srcdir) rune.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* New filter `subunit-notify` that will show a notification window with test
+  statistics when the test run finishes.
+
+* subunit.run will now pipe its output to the command in the
+  SUBUNIT_FORMATTER environment variable, if set.
+
+0.0.4
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* subunit2junitxml -f required a value, this is now fixed and -f acts as a
+  boolean switch with no parameter.
+
+* Building with autoconf 2.65 is now supported.
+
+
+0.0.3
+-----
+
+  CHANGES:
+
+    * License change, by unanimous agreement of contributors to BSD/Apache
+      License Version 2.0. This makes Subunit compatible with more testing
+      frameworks.
+
+  IMPROVEMENTS:
+
+    * CPPUnit is now directly supported: subunit builds a cppunit listener
+      ``libcppunit-subunit``.
+
+    * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
+      from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
+      serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
+      The ``ProtocolTestCase`` parser now calls outcomes using an extended
+      API that permits attaching arbitrary MIME resources such as text files
+      log entries and so on. This extended API is being developed with the
+      Python testing community, and is in flux. ``TestResult`` objects that
+      do not support the API will be detected and transparently downgraded
+      back to the regular Python unittest API.
+
+    * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
+      viarable when installing.
+
+    * Multipart test outcomes are tentatively supported; the exact protocol
+      for them, both serialiser and object is not yet finalised. Testers and
+      early adopters are sought. As part of this and also in an attempt to
+      provider a more precise focus on the wire protocol and toolchain,
+      Subunit now depends on testtools (http://launchpad.net/testtools)
+      release 0.9.0 or newer.
+
+    * subunit2junitxml supports a new option, --forward which causes it
+      to forward the raw subunit stream in a similar manner to tee. This
+      is used with the -o option to both write a xml report and get some
+      other subunit filter to process the stream.
+
+    * The C library now has ``subunit_test_skip``.
+
+  BUG FIXES:
+
+    * Install progress_model.py correctly.
+
+    * Non-gcc builds will no longer try to use gcc specific flags.
+      (Thanks trondn-norbye)
+
+  API CHANGES:
+
+  INTERNALS:
+
+0.0.2
+-----
+
+  CHANGES:
+
+  IMPROVEMENTS:
+
+    * A number of filters now support ``--no-passthrough`` to cause all
+      non-subunit content to be discarded. This is useful when precise control
+      over what is output is required - such as with subunit2junitxml.
+
+    * A small perl parser is now included, and a new ``subunit-diff`` tool
+      using that is included. (Jelmer Vernooij)
+
+    * Subunit streams can now include optional, incremental lookahead
+      information about progress. This allows reporters to make estimates
+      about completion, when such information is available. See the README
+      under ``progress`` for more details.
+
+    * ``subunit-filter`` now supports regex filtering via ``--with`` and
+      ``without`` options. (Martin Pool)
+
+    * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
+      test stream.
+
+    * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
+      test reporter to be used, which has a textual progress bar. This requires
+      a recent bzrlib as a minor bugfix was required in bzrlib to support this.
+
+    * ``subunit2junitxml`` has been added. This filter converts a subunit
+      stream to a single JUnit style XML stream using the pyjunitxml
+      python library.
+
+    * The shell functions support skipping via ``subunit_skip_test`` now.
+
+  BUG FIXES:
+
+    * ``xfail`` outcomes are now passed to python TestResult's via
+      addExpectedFailure if it is present on the TestResult. Python 2.6 and
+      earlier which do not have this function will have ``xfail`` outcomes
+      passed through as success outcomes as earlier versions of subunit did.
+
+  API CHANGES:
+
+    * tags are no longer passed around in python via the ``TestCase.tags``
+      attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
+      and like in the protocol, if called while a test is active only applies
+      to that test. (Robert Collins)
+
+    * ``TestResultFilter`` takes a new optional constructor parameter
+      ``filter_predicate``.  (Martin Pool)
+
+    * When a progress: directive is encountered in a subunit stream, the
+      python bindings now call the ``progress(offset, whence)`` method on
+      ``TestResult``.
+
+    * When a time: directive is encountered in a subunit stream, the python
+      bindings now call the ``time(seconds)`` method on ``TestResult``.
+
+  INTERNALS:
+
+    * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
+      users of subunit will want to wrap their ``TestProtocolClient`` objects
+      in this decorator to get test timing data for performance analysis.
+
+    * (python) ExecTestCase supports passing arguments to test scripts.
+
+    * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
+      which can be used to call some code on every event, without having to
+      implement all the event methods.
+
+    * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
+      causes a timestamp to be output to the stream.
diff --git a/third_party/subunit/README b/third_party/subunit/README
new file mode 100644
index 0000000..ae35a58
--- /dev/null
+++ b/third_party/subunit/README
@@ -0,0 +1,469 @@
+
+  subunit: A streaming protocol for test results
+  Copyright (C) 2005-2013 Robert Collins <robertc at robertcollins.net>
+
+  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+  license at the users choice. A copy of both licenses are available in the
+  project source as Apache-2.0 and BSD. You may not use this file except in
+  compliance with one of these two licences.
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+  license you chose for the specific language governing permissions and
+  limitations under that license.
+
+  See the COPYING file for full details on the licensing of Subunit.
+
+  subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+  licence - see python/iso8601/LICENSE for details.
+
+Subunit
+-------
+
+Subunit is a streaming protocol for test results.
+
+There are two major revisions of the protocol. Version 1 was trivially human
+readable but had significant defects as far as highly parallel testing was
+concerned - it had no room for doing discovery and execution in parallel,
+required substantial buffering when multiplexing and was fragile - a corrupt
+byte could cause an entire stream to be misparsed. Version 1.1 added
+encapsulation of binary streams which mitigated some of the issues but the
+core remained.
+
+Version 2 shares many of the good characteristics of Version 1 - it can be
+embedded into a regular text stream (e.g. from a build system) and it still
+models xUnit style test execution. It also fixes many of the issues with
+Version 1 - Version 2 can be multiplexed without excessive buffering (in
+time or space), it has a well defined recovery mechanism for dealing with
+corrupted streams (e.g. where two processes write to the same stream
+concurrently, or where the stream generator suffers a bug).
+
+More details on both protocol version s can be found in the 'Protocol' section
+of this document.
+
+Subunit comes with command line filters to process a subunit stream and
+language bindings for python, C, C++ and shell. Bindings are easy to write
+for other languages.
+
+A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+   reported/displayed together. For instance, tests from different languages
+   can be shown as a seamless whole, and tests running on multiple machines
+   can be aggregated into a single stream through a multiplexer.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+   other can be run seperately and then aggregated, rather than interfering
+   with each other or requiring an adhoc test->runner reporting protocol.
+ * Grid testing: subunit can act as the necessary serialisation and
+   deserialiation to get test runs on distributed machines to be reported in
+   real time.
+
+Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2csv - convert a subunit stream to csv.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+Integration with other tools
+----------------------------
+
+Subunit's language bindings act as integration with various test runners like
+'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+(typically a few lines) will allow Subunit to be used in more sophisticated
+ways.
+
+Python
+======
+
+Subunit has excellent Python support: most of the filters and tools are written
+in python and there are facilities for using Subunit to increase test isolation
+seamlessly within a test suite.
+
+The most common way is to run an existing python test suite and have it output
+subunit via the ``subunit.run`` module::
+
+  $ python -m subunit.run mypackage.tests.test_suite
+
+For more information on the Python support Subunit offers , please see
+``pydoc subunit``, or the source in ``python/subunit/``
+
+C
+=
+
+Subunit has C bindings to emit the protocol. The 'check' C unit testing project
+has included subunit support in their project for some years now. See
+'c/README' for more details.
+
+C++
+===
+
+The C library is includable and usable directly from C++. A TestListener for
+CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+shell
+=====
+
+There are two sets of shell tools. There are filters, which accept a subunit
+stream on stdin and output processed data (or a transformed stream) on stdout.
+
+Then there are unittest facilities similar to those for C : shell bindings
+consisting of simple functions to output protocol elements, and a patch for
+adding subunit output to the 'ShUnit' shell test runner. See 'shell/README' for
+details.
+
+Filter recipes
+--------------
+
+To ignore some failing tests whose root cause is already known::
+
+  subunit-filter --without 'AttributeError.*flavor'
+
+
+The xUnit test model
+--------------------
+
+Subunit implements a slightly modified xUnit test model. The stock standard
+model is that there are tests, which have an id(), can be run, and when run
+start, emit an outcome (like success or failure) and then finish.
+
+Subunit extends this with the idea of test enumeration (find out about tests
+a runner has without running them), tags (allow users to describe tests in
+ways the test framework doesn't apply any semantic value to), file attachments
+(allow arbitrary data to make analysing a failure easy) and timestamps.
+
+The protocol
+------------
+
+Version 2, or v2 is new and still under development, but is intended to
+supercede version 1 in the very near future. Subunit's bundled tools accept
+only version 2 and only emit version 2, but the new filters subunit-1to2 and
+subunit-2to1 can be used to interoperate with older third party libraries.
+
+Version 2
+=========
+
+Version 2 is a binary protocol consisting of independent packets that can be
+embedded in the output from tools like make - as long as each packet has no
+other bytes mixed in with it (which 'make -j N>1' has a tendency of doing).
+Version 2 is currently in draft form, and early adopters should be willing
+to either discard stored results (if protocol changes are made), or bulk
+convert them back to v1 and then to a newer edition of v2.
+
+The protocol synchronises at the start of the stream, after a packet, or
+after any 0x0A byte. That is, a subunit v2 packet starts after a newline or
+directly after the end of the prior packet.
+
+Subunit is intended to be transported over a reliable streaming protocol such
+as TCP. As such it does not concern itself with out of order delivery of
+packets. However, because of the possibility of corruption due to either
+bugs in the sender, or due to mixed up data from concurrent writes to the same
+fd when being embedded, subunit strives to recover reasonably gracefully from
+damaged data.
+
+A key design goal for Subunit version 2 is to allow processing and multiplexing
+without forcing buffering for semantic correctness, as buffering tends to hide
+hung or otherwise misbehaving tests. That said, limited time based buffering
+for network efficiency is a good idea - this is ultimately implementator
+choice. Line buffering is also discouraged for subunit streams, as dropping
+into a debugger or other tool may require interactive traffic even if line
+buffering would not otherwise be a problem.
+
+In version two there are two conceptual events - a test status event and a file
+attachment event. Events may have timestamps, and the path of multiplexers that
+an event is routed through is recorded to permit sending actions back to the
+source (such as new tests to run or stdin for driving debuggers and other
+interactive input). Test status events are used to enumerate tests, to report
+tests and test helpers as they run. Tests may have tags, used to allow
+tunnelling extra meanings through subunit without requiring parsing of
+arbitrary file attachments. Things that are not standalone tests get marked
+as such by setting the 'Runnable' flag to false. (For instance, individual
+assertions in TAP are not runnable tests, only the top level TAP test script
+is runnable).
+
+File attachments are used to provide rich detail about the nature of a failure.
+File attachments can also be used to encapsulate stdout and stderr both during
+and outside tests.
+
+Most numbers are stored in network byte order - Most Significant Byte first
+encoded using a variation of http://www.dlugosz.com/ZIP2/VLI.html. The first
+byte's top 2 high order bits encode the total number of octets in the number.
+This encoding can encode values from 0 to 2**30-1, enough to encode a
+nanosecond. Numbers that are not variable length encoded are still stored in
+MSB order.
+
+ prefix   octets   max       max
++-------+--------+---------+------------+
+| 00    |      1 |  2**6-1 |         63 |
+| 01    |      2 | 2**14-1 |      16383 |
+| 10    |      3 | 2**22-1 |    4194303 |
+| 11    |      4 | 2**30-1 | 1073741823 |
++-------+--------+---------+------------+
+
+All variable length elements of the packet are stored with a length prefix
+number allowing them to be skipped over for consumers that don't need to
+interpret them.
+
+UTF-8 strings are with no terminating NUL and should not have any embedded NULs
+(implementations SHOULD validate any such strings that they process and take
+some remedial action (such as discarding the packet as corrupt).
+
+In short the structure of a packet is:
+PACKET := SIGNATURE FLAGS PACKET_LENGTH TIMESTAMP? TESTID? TAGS? MIME?
+          FILECONTENT? ROUTING_CODE? CRC32
+
+In more detail...
+
+Packets are identified by a single byte signature - 0xB3, which is never legal
+in a UTF-8 stream as the first byte of a character. 0xB3 starts with the first
+bit set and the second not, which is the UTF-8 signature for a continuation
+byte. 0xB3 was chosen as 0x73 ('s' in ASCII') with the top two bits replaced by
+the 1 and 0 for a continuation byte.
+
+If subunit packets are being embedded in a non-UTF-8 text stream, where 0x73 is
+a legal character, consider either recoding the text to UTF-8, or using
+subunit's 'file' packets to embed the text stream in subunit, rather than the
+other way around.
+
+Following the signature byte comes a 16-bit flags field, which includes a
+4-bit version field - if the version is not 0x2 then the packet cannot be
+read. It is recommended to signal an error at this point (e.g. by emitting
+a synthetic error packet and returning to the top level loop to look for
+new packets, or exiting with an error). If recovery is desired, treat the
+packet signature as an opaque byte and scan for a new synchronisation point.
+NB: Subunit V1 and V2 packets may legitimately included 0xB3 internally,
+as they are an 8-bit safe container format, so recovery from this situation
+may involve an arbitrary number of false positives until an actual packet
+is encountered : and even then it may still be false, failing after passing
+the version check due to coincidence.
+
+Flags are stored in network byte order too.
++-------------------------+------------------------+
+| High byte               | Low byte               |
+| 15 14 13 12 11 10  9  8 | 7  6  5  4  3  2  1  0 |
+| VERSION    |feature bits|                        |
++------------+------------+------------------------+
+
+Valid version values are:
+0x2 - version 2
+
+Feature bits:
+Bit 11 - mask 0x0800 - Test id present.
+Bit 10 - mask 0x0400 - Routing code present.
+Bit  9 - mask 0x0200 - Timestamp present.
+Bit  8 - mask 0x0100 - Test is 'runnable'.
+Bit  7 - mask 0x0080 - Tags are present.
+Bit  6 - mask 0x0040 - File content is present.
+Bit  5 - mask 0x0020 - File MIME type is present.
+Bit  4 - mask 0x0010 - EOF marker.
+Bit  3 - mask 0x0008 - Must be zero in version 2.
+
+Test status gets three bits:
+Bit 2 | Bit 1 | Bit 0 - mask 0x0007 - A test status enum lookup:
+000 - undefined / no test
+001 - Enumeration / existence
+002 - In progress
+003 - Success
+004 - Unexpected Success
+005 - Skipped
+006 - Failed
+007 - Expected failure
+
+After the flags field is a number field giving the length in bytes for the
+entire packet including the signature and the checksum. This length must
+be less than 4MiB - 4194303 bytes. The encoding can obviously record a larger
+number but one of the goals is to avoid requiring large buffers, or causing
+large latency in the packet forward/processing pipeline. Larger file
+attachments can be communicated in multiple packets, and the overhead in such a
+4MiB packet is approximately 0.2%.
+
+The rest of the packet is a series of optional features as specified by the set
+feature bits in the flags field. When absent they are entirely absent.
+
+Forwarding and multiplexing of packets can be done without interpreting the
+remainder of the packet until the routing code and checksum (which are both at
+the end of the packet). Additionally, routers can often avoid copying or moving
+the bulk of the packet, as long as the routing code size increase doesn't force
+the length encoding to take up a new byte (which will only happen to packets
+less than or equal to 16KiB in length) - large packets are very efficient to
+route.
+
+Timestamp when present is a 32 bit unsigned integer for secnods, and a variable
+length number for nanoseconds, representing UTC time since Unix Epoch in
+seconds and nanoseconds.
+
+Test id when present is a UTF-8 string. The test id should uniquely identify
+runnable tests such that they can be selected individually. For tests and other
+actions which cannot be individually run (such as test
+fixtures/layers/subtests) uniqueness is not required (though being human
+meaningful is highly recommended).
+
+Tags when present is a length prefixed vector of UTF-8 strings, one per tag.
+There are no restrictions on tag content (other than the restrictions on UTF-8
+strings in subunit in general). Tags have no ordering.
+
+When a MIME type is present, it defines the MIME type for the file across all
+packets same file (routing code + testid + name uniquely identifies a file,
+reset when EOF is flagged). If a file never has a MIME type set, it should be
+treated as application/octet-stream.
+
+File content when present is a UTF-8 string for the name followed by the length
+in bytes of the content, and then the content octets.
+
+If present routing code is a UTF-8 string. The routing code is used to
+determine which test backend a test was running on when doing data analysis,
+and to route stdin to the test process if interaction is required.
+
+Multiplexers SHOULD add a routing code if none is present, and prefix any
+existing routing code with a routing code ('/' separated) if one is already
+present. For example, a multiplexer might label each stream it is multiplexing
+with a simple ordinal ('0', '1' etc), and given an incoming packet with route
+code '3' from stream '0' would adjust the route code when forwarding the packet
+to be '0/3'.
+
+Following the end of the packet is a CRC-32 checksum of the contents of the
+packet including the signature.
+
+Example packets
+~~~~~~~~~~~~~~~
+
+Trivial test "foo" enumeration packet, with test id, runnable set,
+status=enumeration. Spaces below are to visually break up signature / flags /
+length / testid / crc32
+
+b3 2901 0c 03666f6f 08555f1b
+
+
+Version 1 (and 1.1)
+===================
+
+Version 1 (and 1.1) are mostly human readable protocols.
+
+Sample subunit wire contents
+----------------------------
+
+The following::
+  test: test foo works
+  success: test foo works.
+  test: tar a file.
+  failure: tar a file. [
+  ..
+   ]..  space is eaten.
+  foo.c:34 WARNING foo is not defined.
+  ]
+  a writeln to stdout
+
+When run through subunit2pyunit::
+  .F
+  a writeln to stdout
+
+  ========================
+  FAILURE: tar a file.
+  -------------------
+  ..
+  ]..  space is eaten.
+  foo.c:34 WARNING foo is not defined.
+
+
+Subunit protocol description
+============================
+
+This description is being ported to an EBNF style. Currently its only partly in
+that style, but should be fairly clear all the same. When in doubt, refer the
+source (and ideally help fix up the description!). Generally the protocol is
+line orientated and consists of either directives and their parameters, or
+when outside a DETAILS region unexpected lines which are not interpreted by
+the parser - they should be forwarded unaltered.
+
+test|testing|test:|testing: test LABEL
+success|success:|successful|successful: test LABEL
+success|success:|successful|successful: test LABEL DETAILS
+failure: test LABEL
+failure: test LABEL DETAILS
+error: test LABEL
+error: test LABEL DETAILS
+skip[:] test LABEL
+skip[:] test LABEL DETAILS
+xfail[:] test LABEL
+xfail[:] test LABEL DETAILS
+uxsuccess[:] test LABEL
+uxsuccess[:] test LABEL DETAILS
+progress: [+|-]X
+progress: push
+progress: pop
+tags: [-]TAG ...
+time: YYYY-MM-DD HH:MM:SSZ
+
+LABEL: UTF8*
+NAME: UTF8*
+DETAILS ::= BRACKETED | MULTIPART
+BRACKETED ::= '[' CR UTF8-lines ']' CR
+MULTIPART ::= '[ multipart' CR PART* ']' CR
+PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+unexpected output on stdout -> stdout.
+exit w/0 or last test completing -> error
+
+Tags given outside a test are applied to all following tests
+Tags given after a test: line and before the result line for the same test
+apply only to that test, and inherit the current global tags.
+A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+applying to a single test, or to cancel a global tag.
+
+The progress directive is used to provide progress information about a stream
+so that stream consumer can provide completion estimates, progress bars and so
+on. Stream generators that know how many tests will be present in the stream
+should output "progress: COUNT". Stream filters that add tests should output
+"progress: +COUNT", and those that remove tests should output
+"progress: -COUNT". An absolute count should reset the progress indicators in
+use - it indicates that two separate streams from different generators have
+been trivially concatenated together, and there is no knowledge of how many
+more complete streams are incoming. Smart concatenation could scan each stream
+for their count and sum them, or alternatively translate absolute counts into
+relative counts inline. It is recommended that outputters avoid absolute counts
+unless necessary. The push and pop directives are used to provide local regions
+for progress reporting. This fits with hierarchically operating test
+environments - such as those that organise tests into suites - the top-most
+runner can report on the number of suites, and each suite surround its output
+with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+the progress of the restored level by one step. Encountering progress
+directives between the start and end of a test pair indicates that a previous
+test was interrupted and did not cleanly terminate: it should be implicitly
+closed with an error (the same as when a stream ends with no closing test
+directive for the most recently started test).
+
+The time directive acts as a clock event - it sets the time for all future
+events. The value should be a valid ISO8601 time.
+
+The skip, xfail and uxsuccess outcomes are not supported by all testing
+environments. In Python the testttools (https://launchpad.net/testtools)
+library is used to translate these automatically if an older Python version
+that does not support them is in use. See the testtools documentation for the
+translation policy.
+
+skip is used to indicate a test was discovered but not executed. xfail is used
+to indicate a test that errored in some expected fashion (also know as "TODO"
+tests in some frameworks). uxsuccess is used to indicate and unexpected success
+where a test though to be failing actually passes. It is complementary to
+xfail.
+
+Hacking on subunit
+------------------
+
+Releases
+========
+
+* Update versions in configure.ac and python/subunit/__init__.py.
+* Update NEWS.
+* Do a make distcheck, which will update Makefile etc.
+* Do a PyPI release: PYTHONPATH=../../python python ../../setup.py sdist upload -s
+* Upload the regular one to LP.
+* Push a tagged commit.
diff --git a/third_party/subunit/all_tests.py b/third_party/subunit/all_tests.py
new file mode 100644
index 0000000..23fd65d
--- /dev/null
+++ b/third_party/subunit/all_tests.py
@@ -0,0 +1,36 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import unittest
+
+import subunit
+
+
+class ShellTests(subunit.ExecTestCase):
+
+    def test_sourcing(self):
+        """./shell/tests/test_source_library.sh"""
+
+    def test_functions(self):
+        """./shell/tests/test_function_output.sh"""
+
+
+def test_suite():
+    result = unittest.TestSuite()
+    result.addTest(subunit.test_suite())
+    result.addTest(ShellTests('test_sourcing'))
+    result.addTest(ShellTests('test_functions'))
+    return result
diff --git a/third_party/subunit/c++/README b/third_party/subunit/c++/README
new file mode 100644
index 0000000..7205f90
--- /dev/null
+++ b/third_party/subunit/c++/README
@@ -0,0 +1,50 @@
+#
+#  subunit C++ bindings.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+#
+
+Currently there are no native C++ bindings for subunit. However the C library
+can be used from C++ safely. A CPPUnit listener is built as part of Subunit to
+allow CPPUnit users to simply get Subunit output.
+
+To use the listener, use pkg-config (or your preferred replacement) to get the
+cflags and link settings from libcppunit_subunit.pc.
+
+In your test driver main, use SubunitTestProgressListener, as shown in this
+example main::
+
+  {
+    // Create the event manager and test controller
+    CPPUNIT_NS::TestResult controller;
+
+    // Add a listener that collects test result
+    // so we can get the overall status.
+    // note this isn't needed for subunit...
+    CPPUNIT_NS::TestResultCollector result;
+    controller.addListener( &result );
+
+    // Add a listener that print test activity in subunit format.
+    CPPUNIT_NS::SubunitTestProgressListener progress;
+    controller.addListener( &progress );
+
+    // Add the top suite to the test runner
+    CPPUNIT_NS::TestRunner runner;
+    runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
+    runner.run( controller );
+
+    return result.wasSuccessful() ? 0 : 1;
+  }
diff --git a/third_party/subunit/c++/SubunitTestProgressListener.cpp b/third_party/subunit/c++/SubunitTestProgressListener.cpp
new file mode 100644
index 0000000..261e1f3
--- /dev/null
+++ b/third_party/subunit/c++/SubunitTestProgressListener.cpp
@@ -0,0 +1,63 @@
+/*  Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+ *
+ *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ *  license at the users choice. A copy of both licenses are available in the
+ *  project source as Apache-2.0 and BSD. You may not use this file except in
+ *  compliance with one of these two licences.
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under these licenses is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the license you chose for the specific language governing permissions
+ *  and limitations under that license.
+ */
+
+#include <cppunit/Exception.h>
+#include <cppunit/Test.h>
+#include <cppunit/TestFailure.h>
+#include <cppunit/TextOutputter.h>
+#include <iostream>
+
+// Have to be able to import the public interface without config.h.
+#include "SubunitTestProgressListener.h"
+#include "config.h"
+#include "subunit/child.h"
+
+
+CPPUNIT_NS_BEGIN
+
+
+void
+SubunitTestProgressListener::startTest( Test *test )
+{
+  subunit_test_start(test->getName().c_str());
+  last_test_failed = false;
+}
+
+void
+SubunitTestProgressListener::addFailure( const TestFailure &failure )
+{
+  std::ostringstream capture_stream;
+  TextOutputter outputter(NULL, capture_stream);
+  outputter.printFailureLocation(failure.sourceLine());
+  outputter.printFailureDetail(failure.thrownException());
+
+  if (failure.isError())
+      subunit_test_error(failure.failedTestName().c_str(),
+			 capture_stream.str().c_str());
+  else
+      subunit_test_fail(failure.failedTestName().c_str(),
+                        capture_stream.str().c_str());
+  last_test_failed = true;
+}
+
+void
+SubunitTestProgressListener::endTest( Test *test)
+{
+  if (!last_test_failed)
+      subunit_test_pass(test->getName().c_str());
+}
+
+
+CPPUNIT_NS_END
diff --git a/third_party/subunit/c++/SubunitTestProgressListener.h b/third_party/subunit/c++/SubunitTestProgressListener.h
new file mode 100644
index 0000000..c1cea52
--- /dev/null
+++ b/third_party/subunit/c++/SubunitTestProgressListener.h
@@ -0,0 +1,55 @@
+/*  Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+ *
+ *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ *  license at the users choice. A copy of both licenses are available in the
+ *  project source as Apache-2.0 and BSD. You may not use this file except in
+ *  compliance with one of these two licences.
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under these licenses is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the license you chose for the specific language governing permissions
+ *  and limitations under that license.
+ */
+#ifndef CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+#define CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+
+#include <cppunit/TestListener.h>
+
+
+CPPUNIT_NS_BEGIN
+
+
+/*!
+ * \brief TestListener that outputs subunit
+ * (http://www.robertcollins.net/unittest/subunit) compatible output.
+ * \ingroup TrackingTestExecution
+ */
+class CPPUNIT_API SubunitTestProgressListener : public TestListener
+{
+public:
+
+  SubunitTestProgressListener() {}
+
+  void startTest( Test *test );
+
+  void addFailure( const TestFailure &failure );
+
+  void endTest( Test *test );
+
+private:
+  /// Prevents the use of the copy constructor.
+  SubunitTestProgressListener( const SubunitTestProgressListener &copy );
+
+  /// Prevents the use of the copy operator.
+  void operator =( const SubunitTestProgressListener &copy );
+
+private:
+  int last_test_failed;
+};
+
+
+CPPUNIT_NS_END
+
+#endif  // CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
diff --git a/third_party/subunit/c/README b/third_party/subunit/c/README
new file mode 100644
index 0000000..1c70f85
--- /dev/null
+++ b/third_party/subunit/c/README
@@ -0,0 +1,68 @@
+#
+#  subunit C bindings.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+
+This subtree contains an implementation of the subunit child protocol.
+Currently I have no plans to write a test runner in C, so I have not written
+an implementation of the parent protocol. [but will happily accept patches].
+This implementation is built using SCons and tested via 'check'.
+See the tests/ directory for the test programs.
+You can use `make check` or `scons check` to run the tests.
+
+The C protocol consists of four functions which you can use to output test
+metadata trivially. See lib/subunit_child.[ch] for details.
+
+However, this is not a test runner - subunit provides no support for [for
+instance] managing assertions, cleaning up on errors etc. You can look at
+'check' (http://check.sourceforge.net/) or
+'gunit' (https://garage.maemo.org/projects/gunit) for C unit test
+frameworks.
+There is a patch for 'check' (check-subunit-*.patch) in this source tree.
+Its also available as request ID #1470750 in the sourceforge request tracker
+http://sourceforge.net/tracker/index.php. The 'check' developers have indicated
+they will merge this during the current release cycle.
+
+If you are a test environment maintainer - either homegrown, or 'check' or
+'gunit' or some other, you will to know how the subunit calls should be used.
+Here is what a manually written test using the bindings might look like:
+
+
+void
+a_test(void) {
+  int result;
+  subunit_test_start("test name");
+  # determine if test passes or fails
+  result = SOME_VALUE;
+  if (!result) {
+    subunit_test_pass("test name");
+  } else {
+    subunit_test_fail("test name",
+      "Something went wrong running something:\n"
+      "exited with result: '%s'", result);
+  }
+}
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/third_party/subunit/c/include/subunit/child.h b/third_party/subunit/c/include/subunit/child.h
new file mode 100644
index 0000000..eb1384f
--- /dev/null
+++ b/third_party/subunit/c/include/subunit/child.h
@@ -0,0 +1,96 @@
+/**
+ *
+ *  subunit C bindings.
+ *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+ *
+ *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ *  license at the users choice. A copy of both licenses are available in the
+ *  project source as Apache-2.0 and BSD. You may not use this file except in
+ *  compliance with one of these two licences.
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under these licenses is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the license you chose for the specific language governing permissions
+ *  and limitations under that license.
+ **/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * subunit_test_start:
+ *
+ * Report that a test is starting.
+ * @name: test case name
+ */
+extern void subunit_test_start(char const * const name);
+
+
+/**
+ * subunit_test_pass:
+ *
+ * Report that a test has passed.
+ *
+ * @name: test case name
+ */
+extern void subunit_test_pass(char const * const name);
+
+
+/**
+ * subunit_test_fail:
+ *
+ * Report that a test has failed.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_fail(char const * const name, char const * const error);
+
+
+/**
+ * subunit_test_error:
+ *
+ * Report that a test has errored. An error is an unintentional failure - i.e.
+ * a segfault rather than a failed assertion.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_error(char const * const name,
+                               char const * const error);
+
+
+/**
+ * subunit_test_skip:
+ *
+ * Report that a test has been skipped. An skip is a test that has not run to
+ * conclusion but hasn't given an error either - its result is unknown.
+ * @name: test case name
+ * @reason: a string describing the reason for the skip.
+ */
+extern void subunit_test_skip(char const * const name,
+			      char const * const reason);
+
+
+enum subunit_progress_whence {
+	SUBUNIT_PROGRESS_SET,
+	SUBUNIT_PROGRESS_CUR,
+	SUBUNIT_PROGRESS_POP,
+	SUBUNIT_PROGRESS_PUSH,
+};
+
+/**
+ * subunit_progress:
+ *
+ * Report the progress of a test run.
+ * @whence: The type of progress update to report.
+ * @offset: Offset of the progress (only for SUBUNIT_PROGRESS_SET
+ * 			and SUBUNIT_PROGRESS_CUR).
+ */
+extern void subunit_progress(enum subunit_progress_whence whence, int offset);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/third_party/subunit/c/lib/child.c b/third_party/subunit/c/lib/child.c
new file mode 100644
index 0000000..2d62947
--- /dev/null
+++ b/third_party/subunit/c/lib/child.c
@@ -0,0 +1,104 @@
+/**
+ *
+ *  subunit C child-side bindings: report on tests being run.
+ *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+ *
+ *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ *  license at the users choice. A copy of both licenses are available in the
+ *  project source as Apache-2.0 and BSD. You may not use this file except in
+ *  compliance with one of these two licences.
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under these licenses is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the license you chose for the specific language governing permissions
+ *  and limitations under that license.
+ **/
+
+#include <stdio.h>
+#include <string.h>
+#include "subunit/child.h"
+
+/* Write details about a test event. It is the callers responsibility to ensure
+ * that details are only provided for events the protocol expects details on.
+ * @event: The event - e.g. 'skip'
+ * @name: The test name/id.
+ * @details: The details of the event, may be NULL if no details are present.
+ */
+static void
+subunit_send_event(char const * const event, char const * const name,
+		   char const * const details)
+{
+  if (NULL == details) {
+    fprintf(stdout, "%s: %s\n", event, name);
+  } else {
+    fprintf(stdout, "%s: %s [\n", event, name);
+    fprintf(stdout, "%s", details);
+    if (details[strlen(details) - 1] != '\n')
+      fprintf(stdout, "\n");
+    fprintf(stdout, "]\n");
+  }
+  fflush(stdout);
+}
+
+/* these functions all flush to ensure that the test runner knows the action
+ * that has been taken even if the subsequent test etc takes a long time or
+ * never completes (i.e. a segfault).
+ */
+
+void
+subunit_test_start(char const * const name)
+{
+  subunit_send_event("test", name, NULL);
+}
+
+
+void
+subunit_test_pass(char const * const name)
+{
+  /* TODO: add success details as an option */
+  subunit_send_event("success", name, NULL);
+}
+
+
+void
+subunit_test_fail(char const * const name, char const * const error)
+{
+  subunit_send_event("failure", name, error);
+}
+
+
+void
+subunit_test_error(char const * const name, char const * const error)
+{
+  subunit_send_event("error", name, error);
+}
+
+
+void
+subunit_test_skip(char const * const name, char const * const reason)
+{
+  subunit_send_event("skip", name, reason);
+}
+
+void
+subunit_progress(enum subunit_progress_whence whence, int offset)
+{
+	switch (whence) {
+	case SUBUNIT_PROGRESS_SET:
+		printf("progress: %d\n", offset);
+		break;
+	case SUBUNIT_PROGRESS_CUR:
+		printf("progress: %+-d\n", offset);
+		break;
+	case SUBUNIT_PROGRESS_POP:
+		printf("progress: pop\n");
+		break;
+	case SUBUNIT_PROGRESS_PUSH:
+		printf("progress: push\n");
+		break;
+	default:
+		fprintf(stderr, "Invalid whence %d in subunit_progress()\n", whence);
+		break;
+	}
+}
diff --git a/third_party/subunit/c/tests/test_child.c b/third_party/subunit/c/tests/test_child.c
new file mode 100644
index 0000000..29af4db
--- /dev/null
+++ b/third_party/subunit/c/tests/test_child.c
@@ -0,0 +1,234 @@
+/**
+ *
+ *  subunit C bindings.
+ *  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+ *
+ *  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ *  license at the users choice. A copy of both licenses are available in the
+ *  project source as Apache-2.0 and BSD. You may not use this file except in
+ *  compliance with one of these two licences.
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under these licenses is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the license you chose for the specific language governing permissions
+ *  and limitations under that license.
+ **/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <check.h>
+
+#include "subunit/child.h"
+
+/**
+ * Helper function to capture stdout, run some call, and check what
+ * was written.
+ * @expected the expected stdout content
+ * @function the function to call.
+ **/
+static void
+test_stdout_function(char const * expected,
+                     void (*function)(void))
+{
+    /* test that the start function emits a correct test: line. */
+    int bytecount;
+    int old_stdout;
+    int new_stdout[2];
+    char buffer[100];
+    /* we need a socketpair to capture stdout in */
+    fail_if(pipe(new_stdout), "Failed to create a socketpair.");
+    /* backup stdout so we can replace it */
+    old_stdout = dup(1);
+    if (old_stdout == -1) {
+      close(new_stdout[0]);
+      close(new_stdout[1]);
+      fail("Failed to backup stdout before replacing.");
+    }
+    /* redirect stdout so we can analyse it */
+    if (dup2(new_stdout[1], 1) != 1) {
+      close(old_stdout);
+      close(new_stdout[0]);
+      close(new_stdout[1]);
+      fail("Failed to redirect stdout");
+    }
+    /* yes this can block. Its a test case with < 100 bytes of output.
+     * DEAL.
+     */
+    function();
+    /* flush writes on FILE object to file descriptor */
+    fflush(stdout);
+    /* restore stdout now */
+    if (dup2(old_stdout, 1) != 1) {
+      close(old_stdout);
+      close(new_stdout[0]);
+      close(new_stdout[1]);
+      fail("Failed to restore stdout");
+    }
+    /* and we dont need the write side any more */
+    if (close(new_stdout[1])) {
+      close(new_stdout[0]);
+      fail("Failed to close write side of socketpair.");
+    }
+    /* get the output */
+    bytecount = read(new_stdout[0], buffer, 100);
+    if (0 > bytecount) {
+      close(new_stdout[0]);
+      fail("Failed to read captured output.");
+    }
+    buffer[bytecount]='\0';
+    /* and we dont need the read side any more */
+    fail_if(close(new_stdout[0]), "Failed to close write side of socketpair.");
+    /* compare with expected outcome */
+    fail_if(strcmp(expected, buffer), "Did not get expected output [%s], got [%s]", expected, buffer);
+}
+
+
+static void
+call_test_start(void)
+{
+    subunit_test_start("test case");
+}
+
+
+START_TEST (test_start)
+{
+    test_stdout_function("test: test case\n", call_test_start);
+}
+END_TEST
+
+
+static void
+call_test_pass(void)
+{
+    subunit_test_pass("test case");
+}
+
+
+START_TEST (test_pass)
+{
+    test_stdout_function("success: test case\n", call_test_pass);
+}
+END_TEST
+
+
+static void
+call_test_fail(void)
+{
+    subunit_test_fail("test case", "Multiple lines\n of error\n");
+}
+
+
+START_TEST (test_fail)
+{
+    test_stdout_function("failure: test case [\n"
+                         "Multiple lines\n"
+			 " of error\n"
+			 "]\n",
+			 call_test_fail);
+}
+END_TEST
+
+
+static void
+call_test_error(void)
+{
+    subunit_test_error("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_error)
+{
+    test_stdout_function("error: test case [\n"
+                         "Multiple lines\n"
+			 " of output\n"
+			 "]\n",
+			 call_test_error);
+}
+END_TEST
+
+
+static void
+call_test_skip(void)
+{
+    subunit_test_skip("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_skip)
+{
+    test_stdout_function("skip: test case [\n"
+                         "Multiple lines\n"
+			 " of output\n"
+			 "]\n",
+			 call_test_skip);
+}
+END_TEST
+
+
+static void
+call_test_progress_pop(void)
+{
+	subunit_progress(SUBUNIT_PROGRESS_POP, 0);
+}
+
+static void
+call_test_progress_set(void)
+{
+	subunit_progress(SUBUNIT_PROGRESS_SET, 5);
+}
+
+static void
+call_test_progress_push(void)
+{
+	subunit_progress(SUBUNIT_PROGRESS_PUSH, 0);
+}
+
+static void
+call_test_progress_cur(void)
+{
+	subunit_progress(SUBUNIT_PROGRESS_CUR, -6);
+}
+
+START_TEST (test_progress)
+{
+	test_stdout_function("progress: pop\n",
+			 call_test_progress_pop);
+	test_stdout_function("progress: push\n",
+			 call_test_progress_push);
+	test_stdout_function("progress: 5\n",
+			 call_test_progress_set);
+	test_stdout_function("progress: -6\n",
+			 call_test_progress_cur);
+}
+END_TEST
+
+static Suite *
+child_suite(void)
+{
+    Suite *s = suite_create("subunit_child");
+    TCase *tc_core = tcase_create("Core");
+    suite_add_tcase (s, tc_core);
+    tcase_add_test (tc_core, test_start);
+    tcase_add_test (tc_core, test_pass);
+    tcase_add_test (tc_core, test_fail);
+    tcase_add_test (tc_core, test_error);
+    tcase_add_test (tc_core, test_skip);
+    tcase_add_test (tc_core, test_progress);
+    return s;
+}
+
+
+int
+main(void)
+{
+  int nf;
+  Suite *s = child_suite();
+  SRunner *sr = srunner_create(s);
+  srunner_run_all(sr, CK_NORMAL);
+  nf = srunner_ntests_failed(sr);
+  srunner_free(sr);
+  return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/third_party/subunit/c/wscript b/third_party/subunit/c/wscript
new file mode 100644
index 0000000..5273e5d
--- /dev/null
+++ b/third_party/subunit/c/wscript
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import Options
+
+def configure(conf):
+    if conf.CHECK_BUNDLED_SYSTEM_PKG('subunit', pkg='libsubunit'):
+        conf.define('USING_SYSTEM_SUBUNIT', 1)
+
+def build(bld):
+    if bld.CONFIG_SET('USING_SYSTEM_SUBUNIT'):
+        return
+
+    bld.SAMBA_LIBRARY('subunit',
+              source='lib/child.c',
+              private_library=True,
+              includes='include')
diff --git a/third_party/subunit/configure.ac b/third_party/subunit/configure.ac
new file mode 100644
index 0000000..ef1a048
--- /dev/null
+++ b/third_party/subunit/configure.ac
@@ -0,0 +1,76 @@
+m4_define([SUBUNIT_MAJOR_VERSION], [0])
+m4_define([SUBUNIT_MINOR_VERSION], [0])
+m4_define([SUBUNIT_MICRO_VERSION], [21])
+m4_define([SUBUNIT_VERSION],
+m4_defn([SUBUNIT_MAJOR_VERSION]).m4_defn([SUBUNIT_MINOR_VERSION]).m4_defn([SUBUNIT_MICRO_VERSION]))
+AC_PREREQ([2.59])
+AC_INIT([subunit], [SUBUNIT_VERSION], [subunit-dev at lists.launchpad.net])
+AC_CONFIG_SRCDIR([c/lib/child.c])
+AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects])
+AC_CONFIG_MACRO_DIR([m4])
+[SUBUNIT_MAJOR_VERSION]=SUBUNIT_MAJOR_VERSION
+[SUBUNIT_MINOR_VERSION]=SUBUNIT_MINOR_VERSION
+[SUBUNIT_MICRO_VERSION]=SUBUNIT_MICRO_VERSION
+[SUBUNIT_VERSION]=SUBUNIT_VERSION
+AC_SUBST([SUBUNIT_MAJOR_VERSION])
+AC_SUBST([SUBUNIT_MINOR_VERSION])
+AC_SUBST([SUBUNIT_MICRO_VERSION])
+AC_SUBST([SUBUNIT_VERSION])
+AC_USE_SYSTEM_EXTENSIONS
+AC_PROG_CC
+AC_PROG_CXX
+m4_ifdef([AM_PROG_AR], [AM_PROG_AR])
+AM_PROG_CC_C_O
+AC_PROG_INSTALL
+AC_PROG_LN_S
+AC_PROG_LIBTOOL
+AM_PATH_PYTHON
+
+AS_IF([test "$GCC" = "yes"],
+      [
+  SUBUNIT_CFLAGS="-Wall -Werror -Wextra -Wstrict-prototypes "
+  SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wmissing-prototypes -Wwrite-strings "
+  SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wno-variadic-macros "
+  SUBUNIT_CXXFLAGS="-Wall -Werror -Wextra -Wwrite-strings -Wno-variadic-macros"
+      ])
+
+AM_CFLAGS="$SUBUNIT_CFLAGS -I\$(top_srcdir)/c/include"
+AM_CXXFLAGS="$SUBUNIT_CXXFLAGS -I\$(top_srcdir)/c/include"
+AC_SUBST(AM_CFLAGS)
+AC_SUBST(AM_CXXFLAGS)
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_CHECK_HEADERS([stdlib.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_HEADER_TIME
+AC_STRUCT_TM
+
+AC_CHECK_SIZEOF(int, 4)
+AC_CHECK_SIZEOF(short, 2)
+AC_CHECK_SIZEOF(long, 4)
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+
+# Easier memory management.
+# C unit testing.
+PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])
+# C++ unit testing.
+PKG_CHECK_MODULES([CPPUNIT], [cppunit])
+
+# Output files
+AC_CONFIG_HEADERS([config.h])
+
+AC_CONFIG_FILES([libsubunit.pc
+		 libcppunit_subunit.pc
+                 Makefile
+		 perl/Makefile.PL
+                 ])
+AC_OUTPUT
diff --git a/third_party/subunit/filters/subunit-1to2 b/third_party/subunit/filters/subunit-1to2
new file mode 100755
index 0000000..d59447b
--- /dev/null
+++ b/third_party/subunit/filters/subunit-1to2
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Convert a version 1 subunit stream to version 2 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import ExtendedToStreamDecorator
+
+from subunit import StreamResultToBytes
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+    parser = OptionParser(description=__doc__)
+    return parser
+
+
+def main():
+    parser = make_options(__doc__)
+    (options, args) = parser.parse_args()
+    run_tests_from_stream(find_stream(sys.stdin, args),
+        ExtendedToStreamDecorator(StreamResultToBytes(sys.stdout)))
+    sys.exit(0)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/third_party/subunit/filters/subunit-2to1 b/third_party/subunit/filters/subunit-2to1
new file mode 100755
index 0000000..4dc36b9
--- /dev/null
+++ b/third_party/subunit/filters/subunit-2to1
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Convert a version 2 subunit stream to a version 1 stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import ByteStreamToStreamResult, TestProtocolClient
+from subunit.filters import find_stream, run_tests_from_stream
+
+
+def make_options(description):
+    parser = OptionParser(description=__doc__)
+    return parser
+
+
+def main():
+    parser = make_options(__doc__)
+    (options, args) = parser.parse_args()
+    case = ByteStreamToStreamResult(
+        find_stream(sys.stdin, args), non_subunit_name='stdout')
+    result = StreamToExtendedDecorator(TestProtocolClient(sys.stdout))
+    # What about stdout chunks?
+    result.startTestRun()
+    case.run(result)
+    result.stopTestRun()
+    sys.exit(0)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/third_party/subunit/filters/subunit-filter b/third_party/subunit/filters/subunit-filter
new file mode 100755
index 0000000..df83882
--- /dev/null
+++ b/third_party/subunit/filters/subunit-filter
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 200-2013  Robert Collins <robertc at robertcollins.net>
+#            (C) 2009  Martin Pool
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Filter a subunit stream to include/exclude tests.
+
+The default is to strip successful tests.
+
+Tests can be filtered by Python regular expressions with --with and --without,
+which match both the test name and the error text (if any).  The result
+contains tests which match any of the --with expressions and none of the
+--without expressions.  For case-insensitive matching prepend '(?i)'.
+Remember to quote shell metacharacters.
+"""
+
+from optparse import OptionParser
+import sys
+import re
+
+from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
+
+from subunit import (
+    DiscardStream,
+    ProtocolTestCase,
+    StreamResultToBytes,
+    read_test_list,
+    )
+from subunit.filters import filter_by_result, find_stream
+from subunit.test_results import (
+    and_predicates,
+    make_tag_filter,
+    TestResultFilter,
+    )
+
+
+def make_options(description):
+    parser = OptionParser(description=__doc__)
+    parser.add_option("--error", action="store_false",
+        help="include errors", default=False, dest="error")
+    parser.add_option("-e", "--no-error", action="store_true",
+        help="exclude errors", dest="error")
+    parser.add_option("--failure", action="store_false",
+        help="include failures", default=False, dest="failure")
+    parser.add_option("-f", "--no-failure", action="store_true",
+        help="exclude failures", dest="failure")
+    parser.add_option("--passthrough", action="store_false",
+        help="Forward non-subunit input as 'stdout'.", default=False,
+        dest="no_passthrough")
+    parser.add_option("--no-passthrough", action="store_true",
+        help="Discard all non subunit input.", default=False,
+        dest="no_passthrough")
+    parser.add_option("-s", "--success", action="store_false",
+        help="include successes", dest="success")
+    parser.add_option("--no-success", action="store_true",
+        help="exclude successes", default=True, dest="success")
+    parser.add_option("--no-skip", action="store_true",
+        help="exclude skips", dest="skip")
+    parser.add_option("--xfail", action="store_false",
+        help="include expected falures", default=True, dest="xfail")
+    parser.add_option("--no-xfail", action="store_true",
+        help="exclude expected falures", default=True, dest="xfail")
+    parser.add_option(
+        "--with-tag", type=str,
+        help="include tests with these tags", action="append", dest="with_tags")
+    parser.add_option(
+        "--without-tag", type=str,
+        help="exclude tests with these tags", action="append", dest="without_tags")
+    parser.add_option("-m", "--with", type=str,
+        help="regexp to include (case-sensitive by default)",
+        action="append", dest="with_regexps")
+    parser.add_option("--fixup-expected-failures", type=str,
+        help="File with list of test ids that are expected to fail; on failure "
+             "their result will be changed to xfail; on success they will be "
+             "changed to error.", dest="fixup_expected_failures", action="append")
+    parser.add_option("--without", type=str,
+        help="regexp to exclude (case-sensitive by default)",
+        action="append", dest="without_regexps")
+    parser.add_option("-F", "--only-genuine-failures", action="callback",
+        callback=only_genuine_failures_callback,
+        help="Only pass through failures and exceptions.")
+    return parser
+
+
+def only_genuine_failures_callback(option, opt, value, parser):
+    parser.rargs.insert(0, '--no-passthrough')
+    parser.rargs.insert(0, '--no-xfail')
+    parser.rargs.insert(0, '--no-skip')
+    parser.rargs.insert(0, '--no-success')
+
+
+def _compile_re_from_list(l):
+    return re.compile("|".join(l), re.MULTILINE)
+
+
+def _make_regexp_filter(with_regexps, without_regexps):
+    """Make a callback that checks tests against regexps.
+
+    with_regexps and without_regexps are each either a list of regexp strings,
+    or None.
+    """
+    with_re = with_regexps and _compile_re_from_list(with_regexps)
+    without_re = without_regexps and _compile_re_from_list(without_regexps)
+
+    def check_regexps(test, outcome, err, details, tags):
+        """Check if this test and error match the regexp filters."""
+        test_str = str(test) + outcome + str(err) + str(details)
+        if with_re and not with_re.search(test_str):
+            return False
+        if without_re and without_re.search(test_str):
+            return False
+        return True
+    return check_regexps
+
+
+def _make_result(output, options, predicate):
+    """Make the result that we'll send the test outcomes to."""
+    fixup_expected_failures = set()
+    for path in options.fixup_expected_failures or ():
+        fixup_expected_failures.update(read_test_list(path))
+    return StreamToExtendedDecorator(TestResultFilter(
+        ExtendedToStreamDecorator(
+        StreamResultToBytes(output)),
+        filter_error=options.error,
+        filter_failure=options.failure,
+        filter_success=options.success,
+        filter_skip=options.skip,
+        filter_xfail=options.xfail,
+        filter_predicate=predicate,
+        fixup_expected_failures=fixup_expected_failures))
+
+
+def main():
+    parser = make_options(__doc__)
+    (options, args) = parser.parse_args()
+
+    regexp_filter = _make_regexp_filter(
+        options.with_regexps, options.without_regexps)
+    tag_filter = make_tag_filter(options.with_tags, options.without_tags)
+    filter_predicate = and_predicates([regexp_filter, tag_filter])
+
+    filter_by_result(
+        lambda output_to: _make_result(sys.stdout, options, filter_predicate),
+        output_path=None,
+        passthrough=(not options.no_passthrough),
+        forward=False,
+        protocol_version=2,
+        input_stream=find_stream(sys.stdin, args))
+    sys.exit(0)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/third_party/subunit/filters/subunit-ls b/third_party/subunit/filters/subunit-ls
new file mode 100755
index 0000000..db6674a
--- /dev/null
+++ b/third_party/subunit/filters/subunit-ls
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2008  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""List tests in a subunit stream."""
+
+from optparse import OptionParser
+import sys
+
+from testtools import (
+    CopyStreamResult, StreamToExtendedDecorator, StreamResultRouter,
+    StreamSummary)
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream, run_tests_from_stream
+from subunit.test_results import (
+    CatFiles,
+    TestIdPrintingResult,
+    )
+
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--times", action="store_true",
+    help="list the time each test took (requires a timestamped stream)",
+        default=False)
+parser.add_option("--exists", action="store_true",
+    help="list tests that are reported as existing (as well as ran)",
+        default=False)
+parser.add_option("--no-passthrough", action="store_true",
+    help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+    find_stream(sys.stdin, args), non_subunit_name="stdout")
+result = TestIdPrintingResult(sys.stdout, options.times, options.exists)
+if not options.no_passthrough:
+    result = StreamResultRouter(result)
+    cat = CatFiles(sys.stdout)
+    result.add_rule(cat, 'test_id', test_id=None)
+summary = StreamSummary()
+result = CopyStreamResult([result, summary])
+result.startTestRun()
+test.run(result)
+result.stopTestRun()
+if summary.wasSuccessful():
+    exit_code = 0
+else:
+    exit_code = 1
+sys.exit(exit_code)
diff --git a/third_party/subunit/filters/subunit-notify b/third_party/subunit/filters/subunit-notify
new file mode 100755
index 0000000..619c1b1
--- /dev/null
+++ b/third_party/subunit/filters/subunit-notify
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2010 Jelmer Vernooij <jelmer at samba.org>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Notify the user of a finished test run."""
+
+import pygtk
+pygtk.require('2.0')
+import pynotify
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+if not pynotify.init("Subunit-notify"):
+    sys.exit(1)
+
+
+def notify_of_result(result):
+    result = result.decorated
+    if result.failed_tests > 0:
+        summary = "Test run failed"
+    else:
+        summary = "Test run successful"
+    body = "Total tests: %d; Passed: %d; Failed: %d" % (
+        result.total_tests,
+        result.passed_tests,
+        result.failed_tests,
+    )
+    nw = pynotify.Notification(summary, body)
+    nw.show()
+
+
+run_filter_script(
+    lambda output:StreamToExtendedDecorator(TestResultStats(output)),
+    __doc__, notify_of_result, protocol_version=2)
diff --git a/third_party/subunit/filters/subunit-output b/third_party/subunit/filters/subunit-output
new file mode 100644
index 0000000..61e5d11
--- /dev/null
+++ b/third_party/subunit/filters/subunit-output
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2013 Subunit Contributors
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+
+
+"""A command-line tool to generate a subunit result byte-stream."""
+
+from subunit._output import output_main
+
+
+if __name__ == '__main__':
+    exit(output_main())
diff --git a/third_party/subunit/filters/subunit-stats b/third_party/subunit/filters/subunit-stats
new file mode 100755
index 0000000..d15f8dd
--- /dev/null
+++ b/third_party/subunit/filters/subunit-stats
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import TestResultStats
+from subunit.filters import run_filter_script
+
+
+result = TestResultStats(sys.stdout)
+def show_stats(r):
+    r.decorated.formatStats()
+run_filter_script(
+    lambda output:StreamToExtendedDecorator(result),
+    __doc__, show_stats, protocol_version=2, passthrough_subunit=False)
diff --git a/third_party/subunit/filters/subunit-tags b/third_party/subunit/filters/subunit-tags
new file mode 100755
index 0000000..e83b646
--- /dev/null
+++ b/third_party/subunit/filters/subunit-tags
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""A filter to change tags on a subunit stream.
+
+subunit-tags foo -> adds foo
+subunit-tags foo -bar -> adds foo and removes bar
+"""
+
+import sys
+
+from subunit import tag_stream
+
+sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/third_party/subunit/filters/subunit2csv b/third_party/subunit/filters/subunit2csv
new file mode 100755
index 0000000..4adf5cd
--- /dev/null
+++ b/third_party/subunit/filters/subunit2csv
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is d on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Turn a subunit stream into a CSV"""
+
+from testtools import StreamToExtendedDecorator
+
+from subunit.filters import run_filter_script
+from subunit.test_results import CsvResult
+
+
+run_filter_script(lambda output:StreamToExtendedDecorator(CsvResult(output)),
+    __doc__, protocol_version=2)
diff --git a/third_party/subunit/filters/subunit2gtk b/third_party/subunit/filters/subunit2gtk
new file mode 100755
index 0000000..4504af9
--- /dev/null
+++ b/third_party/subunit/filters/subunit2gtk
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+### The GTK progress bar __init__ function is derived from the pygtk tutorial:
+# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
+#
+# The GTK Tutorial is Copyright (C) 1997 Ian Main.
+#
+# Copyright (C) 1998-1999 Tony Gale.
+#
+# Permission is granted to make and distribute verbatim copies of this manual
+# provided the copyright notice and this permission notice are preserved on all
+# copies.
+#
+# Permission is granted to copy and distribute modified versions of this
+# document under the conditions for verbatim copying, provided that this
+# copyright notice is included exactly as in the original, and that the entire
+# resulting derived work is distributed under the terms of a permission notice
+# identical to this one.
+#
+# Permission is granted to copy and distribute translations of this document
+# into another language, under the above conditions for modified versions.
+#
+# If you are intending to incorporate this document into a published work,
+# please contact the maintainer, and we will make an effort to ensure that you
+# have the most up to date information available.
+#
+# There is no guarantee that this document lives up to its intended purpose.
+# This is simply provided as a free resource. As such, the authors and
+# maintainers of the information provided within can not make any guarantee
+# that the information is even accurate.
+
+"""Display a subunit stream in a gtk progress window."""
+
+import sys
+import threading
+import unittest
+
+import pygtk
+pygtk.require('2.0')
+import gtk, gtk.gdk, gobject
+
+from testtools import StreamToExtendedDecorator
+
+from subunit import (
+    PROGRESS_POP,
+    PROGRESS_PUSH,
+    PROGRESS_SET,
+    ByteStreamToStreamResult,
+    )
+from subunit.progress_model import  ProgressModel
+
+
+class GTKTestResult(unittest.TestResult):
+
+    def __init__(self):
+        super(GTKTestResult, self).__init__()
+        # Instance variables (in addition to TestResult)
+        self.window = None
+        self.run_label = None
+        self.ok_label = None
+        self.not_ok_label = None
+        self.total_tests = None
+
+        self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+        self.window.set_resizable(True)
+
+        self.window.connect("destroy", gtk.main_quit)
+        self.window.set_title("Tests...")
+        self.window.set_border_width(0)
+
+        vbox = gtk.VBox(False, 5)
+        vbox.set_border_width(10)
+        self.window.add(vbox)
+        vbox.show()
+
+        # Create a centering alignment object
+        align = gtk.Alignment(0.5, 0.5, 0, 0)
+        vbox.pack_start(align, False, False, 5)
+        align.show()
+
+        # Create the ProgressBar
+        self.pbar = gtk.ProgressBar()
+        align.add(self.pbar)
+        self.pbar.set_text("Running")
+        self.pbar.show()
+        self.progress_model = ProgressModel()
+
+        separator = gtk.HSeparator()
+        vbox.pack_start(separator, False, False, 0)
+        separator.show()
+
+        # rows, columns, homogeneous
+        table = gtk.Table(2, 3, False)
+        vbox.pack_start(table, False, True, 0)
+        table.show()
+        # Show summary details about the run. Could use an expander.
+        label = gtk.Label("Run:")
+        table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        label.show()
+        self.run_label = gtk.Label("N/A")
+        table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        self.run_label.show()
+
+        label = gtk.Label("OK:")
+        table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        label.show()
+        self.ok_label = gtk.Label("N/A")
+        table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        self.ok_label.show()
+
+        label = gtk.Label("Not OK:")
+        table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        label.show()
+        self.not_ok_label = gtk.Label("N/A")
+        table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
+            gtk.EXPAND | gtk.FILL, 5, 5)
+        self.not_ok_label.show()
+
+        self.window.show()
+        # For the demo.
+        self.window.set_keep_above(True)
+        self.window.present()
+
+    def stopTest(self, test):
+        super(GTKTestResult, self).stopTest(test)
+        gobject.idle_add(self._stopTest)
+
+    def _stopTest(self):
+        self.progress_model.advance()
+        if self.progress_model.width() == 0:
+            self.pbar.pulse()
+        else:
+            pos = self.progress_model.pos()
+            width = self.progress_model.width()
+            percentage = (pos / float(width))
+            self.pbar.set_fraction(percentage)
+
+    def stopTestRun(self):
+        try:
+            super(GTKTestResult, self).stopTestRun()
+        except AttributeError:
+            pass
+        gobject.idle_add(self.pbar.set_text, 'Finished')
+
+    def addError(self, test, err):
+        super(GTKTestResult, self).addError(test, err)
+        gobject.idle_add(self.update_counts)
+
+    def addFailure(self, test, err):
+        super(GTKTestResult, self).addFailure(test, err)
+        gobject.idle_add(self.update_counts)
+
+    def addSuccess(self, test):
+        super(GTKTestResult, self).addSuccess(test)
+        gobject.idle_add(self.update_counts)
+
+    def addSkip(self, test, reason):
+        # addSkip is new in Python 2.7/3.1
+        addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
+        if callable(addSkip):
+            addSkip(test, reason)
+        gobject.idle_add(self.update_counts)
+
+    def addExpectedFailure(self, test, err):
+        # addExpectedFailure is new in Python 2.7/3.1
+        addExpectedFailure = getattr(super(GTKTestResult, self),
+            'addExpectedFailure', None)
+        if callable(addExpectedFailure):
+            addExpectedFailure(test, err)
+        gobject.idle_add(self.update_counts)
+
+    def addUnexpectedSuccess(self, test):
+        # addUnexpectedSuccess is new in Python 2.7/3.1
+        addUnexpectedSuccess = getattr(super(GTKTestResult, self),
+            'addUnexpectedSuccess', None)
+        if callable(addUnexpectedSuccess):
+            addUnexpectedSuccess(test)
+        gobject.idle_add(self.update_counts)
+
+    def progress(self, offset, whence):
+        if whence == PROGRESS_PUSH:
+            self.progress_model.push()
+        elif whence == PROGRESS_POP:
+            self.progress_model.pop()
+        elif whence == PROGRESS_SET:
+            self.total_tests = offset
+            self.progress_model.set_width(offset)
+        else:
+            self.total_tests += offset
+            self.progress_model.adjust_width(offset)
+
+    def time(self, a_datetime):
+        # We don't try to estimate completion yet.
+        pass
+
+    def update_counts(self):
+        self.run_label.set_text(str(self.testsRun))
+        bad = len(self.failures + self.errors)
+        self.ok_label.set_text(str(self.testsRun - bad))
+        self.not_ok_label.set_text(str(bad))
+
+gobject.threads_init()
+result = StreamToExtendedDecorator(GTKTestResult())
+test = ByteStreamToStreamResult(sys.stdin, non_subunit_name='stdout')
+# Get setup
+while gtk.events_pending():
+  gtk.main_iteration()
+# Start IO
+def run_and_finish():
+    test.run(result)
+    result.stopTestRun()
+t = threading.Thread(target=run_and_finish)
+t.daemon = True
+result.startTestRun()
+t.start()
+gtk.main()
+if result.decorated.wasSuccessful():
+    exit_code = 0
+else:
+    exit_code = 1
+sys.exit(exit_code)
diff --git a/third_party/subunit/filters/subunit2junitxml b/third_party/subunit/filters/subunit2junitxml
new file mode 100755
index 0000000..3ed078f
--- /dev/null
+++ b/third_party/subunit/filters/subunit2junitxml
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+
+import sys
+
+from testtools import StreamToExtendedDecorator
+
+from subunit.filters import run_filter_script
+
+try:
+    from junitxml import JUnitXmlResult
+except ImportError:
+    sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
+        "http://pypi.python.org/pypi/junitxml) is required for this filter.")
+    raise
+
+
+run_filter_script(
+    lambda output:StreamToExtendedDecorator(JUnitXmlResult(output)), __doc__,
+    protocol_version=2)
diff --git a/third_party/subunit/filters/subunit2pyunit b/third_party/subunit/filters/subunit2pyunit
new file mode 100755
index 0000000..cdeb48e
--- /dev/null
+++ b/third_party/subunit/filters/subunit2pyunit
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Display a subunit stream through python's unittest test runner."""
+
+from operator import methodcaller
+from optparse import OptionParser
+import sys
+import unittest
+
+from testtools import StreamToExtendedDecorator, DecorateTestCaseResult, StreamResultRouter
+
+from subunit import ByteStreamToStreamResult
+from subunit.filters import find_stream
+from subunit.test_results import CatFiles
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+    help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("--progress", action="store_true",
+    help="Use bzrlib's test reporter (requires bzrlib)",
+        default=False)
+(options, args) = parser.parse_args()
+test = ByteStreamToStreamResult(
+    find_stream(sys.stdin, args), non_subunit_name='stdout')
+def wrap_result(result):
+    result = StreamToExtendedDecorator(result)
+    if not options.no_passthrough:
+        result = StreamResultRouter(result)
+        result.add_rule(CatFiles(sys.stdout), 'test_id', test_id=None)
+    return result
+test = DecorateTestCaseResult(test, wrap_result,
+    before_run=methodcaller('startTestRun'),
+    after_run=methodcaller('stopTestRun'))
+if options.progress:
+    from bzrlib.tests import TextTestRunner
+    from bzrlib import ui
+    ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
+    runner = TextTestRunner()
+else:
+    runner = unittest.TextTestRunner(verbosity=2)
+if runner.run(test).wasSuccessful():
+    exit_code = 0
+else:
+    exit_code = 1
+sys.exit(exit_code)
diff --git a/third_party/subunit/filters/tap2subunit b/third_party/subunit/filters/tap2subunit
new file mode 100755
index 0000000..1c5b239
--- /dev/null
+++ b/third_party/subunit/filters/tap2subunit
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""A filter that reads a TAP stream and outputs a subunit stream.
+
+More information on TAP is available at
+http://testanything.org/wiki/index.php/Main_Page.
+"""
+
+import sys
+
+from subunit import TAP2SubUnit
+sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/third_party/subunit/libcppunit_subunit.pc.in b/third_party/subunit/libcppunit_subunit.pc.in
new file mode 100644
index 0000000..98982c7
--- /dev/null
+++ b/third_party/subunit/libcppunit_subunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: cppunit subunit listener
+Description: Subunit output listener for the CPPUnit test library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/third_party/subunit/libsubunit.pc.in b/third_party/subunit/libsubunit.pc.in
new file mode 100644
index 0000000..6756414
--- /dev/null
+++ b/third_party/subunit/libsubunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: subunit
+Description: Subunit test protocol library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/third_party/subunit/perl/Makefile.PL.in b/third_party/subunit/perl/Makefile.PL.in
new file mode 100755
index 0000000..749d468
--- /dev/null
+++ b/third_party/subunit/perl/Makefile.PL.in
@@ -0,0 +1,21 @@
+use ExtUtils::MakeMaker;
+WriteMakefile(
+    'PREFIX' => '@prefix@',
+    'NAME'	=> 'Subunit',
+    'VERSION' => '@SUBUNIT_VERSION@',
+    'test' => { 'TESTS' => 'tests/*.pl' },
+    'PMLIBDIRS' => [ 'lib' ],
+    'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
+);
+sub MY::postamble {
+<<'EOT';
+check: # test
+
+uninstall_distcheck:
+	find $(DESTDIR)$(INSTALLSITEARCH) -type f -exec rm {} \;
+	rm MYMETA.yml
+
+VPATH = @srcdir@
+.PHONY: uninstall_distcheck
+EOT
+}
diff --git a/third_party/subunit/perl/lib/Subunit.pm b/third_party/subunit/perl/lib/Subunit.pm
new file mode 100644
index 0000000..7da2cdc
--- /dev/null
+++ b/third_party/subunit/perl/lib/Subunit.pm
@@ -0,0 +1,183 @@
+# Perl module for parsing and generating the Subunit protocol
+# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer at samba.org>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+
+package Subunit;
+use POSIX;
+
+require Exporter;
+ at ISA = qw(Exporter);
+ at EXPORT_OK = qw(parse_results $VERSION);
+
+use vars qw ( $VERSION );
+
+$VERSION = '0.0.2';
+
+use strict;
+
+sub parse_results($$$)
+{
+	my ($msg_ops, $statistics, $fh) = @_;
+	my $expected_fail = 0;
+	my $unexpected_fail = 0;
+	my $unexpected_err = 0;
+	my $open_tests = [];
+
+	while(<$fh>) {
+		if (/^test: (.+)\n/) {
+			$msg_ops->control_msg($_);
+			$msg_ops->start_test($1);
+			push (@$open_tests, $1);
+		} elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
+			$msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
+		} elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
+			$msg_ops->control_msg($_);
+			my $result = $1;
+			my $testname = $2;
+			my $reason = undef;
+			if ($3) {
+				$reason = "";
+				# reason may be specified in next lines
+				my $terminated = 0;
+				while(<$fh>) {
+					$msg_ops->control_msg($_);
+					if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
+				}
+
+				unless ($terminated) {
+					$statistics->{TESTS_ERROR}++;
+					$msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
+					return 1;
+				}
+			}
+			if ($result eq "success" or $result eq "successful") {
+				pop(@$open_tests); #FIXME: Check that popped value == $testname
+				$statistics->{TESTS_EXPECTED_OK}++;
+				$msg_ops->end_test($testname, $result, 0, $reason);
+			} elsif ($result eq "xfail" or $result eq "knownfail") {
+				pop(@$open_tests); #FIXME: Check that popped value == $testname
+				$statistics->{TESTS_EXPECTED_FAIL}++;
+				$msg_ops->end_test($testname, $result, 0, $reason);
+				$expected_fail++;
+			} elsif ($result eq "failure" or $result eq "fail") {
+				pop(@$open_tests); #FIXME: Check that popped value == $testname
+				$statistics->{TESTS_UNEXPECTED_FAIL}++;
+				$msg_ops->end_test($testname, $result, 1, $reason);
+				$unexpected_fail++;
+			} elsif ($result eq "skip") {
+				$statistics->{TESTS_SKIP}++;
+				my $last = pop(@$open_tests);
+				if (defined($last) and $last ne $testname) {
+					push (@$open_tests, $testname);
+				}
+				$msg_ops->end_test($testname, $result, 0, $reason);
+			} elsif ($result eq "error") {
+				$statistics->{TESTS_ERROR}++;
+				pop(@$open_tests); #FIXME: Check that popped value == $testname
+				$msg_ops->end_test($testname, $result, 1, $reason);
+				$unexpected_err++;
+			}
+		} else {
+			$msg_ops->output_msg($_);
+		}
+	}
+
+	while ($#$open_tests+1 > 0) {
+		$msg_ops->end_test(pop(@$open_tests), "error", 1,
+				   "was started but never finished!");
+		$statistics->{TESTS_ERROR}++;
+		$unexpected_err++;
+	}
+
+	return 1 if $unexpected_err > 0;
+	return 1 if $unexpected_fail > 0;
+	return 0;
+}
+
+sub start_test($)
+{
+	my ($testname) = @_;
+	print "test: $testname\n";
+}
+
+sub end_test($$;$)
+{
+	my $name = shift;
+	my $result = shift;
+	my $reason = shift;
+	if ($reason) {
+		print "$result: $name [\n";
+		print "$reason";
+		print "]\n";
+	} else {
+		print "$result: $name\n";
+	}
+}
+
+sub skip_test($;$)
+{
+	my $name = shift;
+	my $reason = shift;
+	end_test($name, "skip", $reason);
+}
+
+sub fail_test($;$)
+{
+	my $name = shift;
+	my $reason = shift;
+	end_test($name, "failure", $reason);
+}
+
+sub success_test($;$)
+{
+	my $name = shift;
+	my $reason = shift;
+	end_test($name, "success", $reason);
+}
+
+sub xfail_test($;$)
+{
+	my $name = shift;
+	my $reason = shift;
+	end_test($name, "xfail", $reason);
+}
+
+sub report_time($)
+{
+	my ($time) = @_;
+	my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
+	printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
+}
+
+sub progress_pop()
+{
+	print "progress: pop\n";
+}
+
+sub progress_push()
+{
+	print "progress: push\n";
+}
+
+sub progress($;$)
+{
+	my ($count, $whence) = @_;
+
+	unless(defined($whence)) {
+		$whence = "";
+	}
+
+	print "progress: $whence$count\n";
+}
+
+1;
diff --git a/third_party/subunit/perl/lib/Subunit/Diff.pm b/third_party/subunit/perl/lib/Subunit/Diff.pm
new file mode 100644
index 0000000..1262dd9
--- /dev/null
+++ b/third_party/subunit/perl/lib/Subunit/Diff.pm
@@ -0,0 +1,85 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer at samba.org>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+
+package Subunit::Diff;
+
+use strict;
+
+use Subunit qw(parse_results);
+
+sub control_msg() { }
+sub report_time($$) { }
+
+sub output_msg($$)
+{
+	my ($self, $msg) = @_;
+
+	# No output for now, perhaps later diff this as well ?
+}
+
+sub start_test($$)
+{
+	my ($self, $testname) = @_;
+}
+
+sub end_test($$$$$)
+{
+	my ($self, $testname, $result, $unexpected, $reason) = @_;
+
+	$self->{$testname} = $result;
+}
+
+sub new {
+	my ($class) = @_;
+
+	my $self = {
+	};
+	bless($self, $class);
+}
+
+sub from_file($)
+{
+	my ($path) = @_;
+	my $statistics = {
+		TESTS_UNEXPECTED_OK => 0,
+		TESTS_EXPECTED_OK => 0,
+		TESTS_UNEXPECTED_FAIL => 0,
+		TESTS_EXPECTED_FAIL => 0,
+		TESTS_ERROR => 0,
+		TESTS_SKIP => 0,
+	};
+
+	my $ret = new Subunit::Diff();
+	open(IN, $path) or return;
+	parse_results($ret, $statistics, *IN);
+	close(IN);
+	return $ret;
+}
+
+sub diff($$)
+{
+	my ($old, $new) = @_;
+	my $ret = {};
+
+	foreach my $testname (keys %$old) {
+		if ($new->{$testname} ne $old->{$testname}) {
+			$ret->{$testname} = [$old->{$testname}, $new->{$testname}];
+		}
+	}
+
+	return $ret;
+}
+
+1;
diff --git a/third_party/subunit/perl/subunit-diff b/third_party/subunit/perl/subunit-diff
new file mode 100755
index 0000000..8c8516a
--- /dev/null
+++ b/third_party/subunit/perl/subunit-diff
@@ -0,0 +1,31 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer at samba.org>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+
+use Getopt::Long;
+use strict;
+use FindBin qw($RealBin $Script);
+use lib "$RealBin/lib";
+use Subunit::Diff;
+
+my $old = Subunit::Diff::from_file($ARGV[0]);
+my $new = Subunit::Diff::from_file($ARGV[1]);
+
+my $ret = Subunit::Diff::diff($old, $new);
+
+foreach my $e (sort(keys %$ret)) {
+	printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
+}
+
+0;
diff --git a/third_party/subunit/python/iso8601/LICENSE b/third_party/subunit/python/iso8601/LICENSE
new file mode 100644
index 0000000..5ca93da
--- /dev/null
+++ b/third_party/subunit/python/iso8601/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2007 Michael Twomey
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/subunit/python/iso8601/README b/third_party/subunit/python/iso8601/README
new file mode 100644
index 0000000..dcb7477
--- /dev/null
+++ b/third_party/subunit/python/iso8601/README
@@ -0,0 +1,26 @@
+A simple package to deal with ISO 8601 date time formats.
+
+ISO 8601 defines a neutral, unambiguous date string format, which also
+has the property of sorting naturally.
+
+e.g. YYYY-MM-DDTHH:MM:SSZ or 2007-01-25T12:00:00Z
+
+Currently this covers only the most common date formats encountered, not
+all of ISO 8601 is handled.
+
+Currently the following formats are handled:
+
+* 2006-01-01T00:00:00Z
+* 2006-01-01T00:00:00[+-]00:00
+
+I'll add more as I encounter them in my day to day life. Patches with
+new formats and tests will be gratefully accepted of course :)
+
+References:
+
+* http://www.cl.cam.ac.uk/~mgk25/iso-time.html - simple overview
+
+* http://hydracen.com/dx/iso8601.htm - more detailed enumeration of
+  valid formats.
+
+See the LICENSE file for the license this package is released under.
diff --git a/third_party/subunit/python/iso8601/README.subunit b/third_party/subunit/python/iso8601/README.subunit
new file mode 100644
index 0000000..d1ed8a1
--- /dev/null
+++ b/third_party/subunit/python/iso8601/README.subunit
@@ -0,0 +1,5 @@
+This is a [slightly rearranged] import of http://pypi.python.org/pypi/iso8601/
+version 0.1.4. The OS X hidden files have been stripped, and the package
+turned into a single module, to simplify installation. The remainder of the
+source distribution is included in the subunit source tree at python/iso8601
+for reference.
diff --git a/third_party/subunit/python/iso8601/setup.py b/third_party/subunit/python/iso8601/setup.py
new file mode 100644
index 0000000..d208647
--- /dev/null
+++ b/third_party/subunit/python/iso8601/setup.py
@@ -0,0 +1,58 @@
+try:
+    from setuptools import setup
+except ImportError:
+    from distutils import setup
+
+long_description="""Simple module to parse ISO 8601 dates
+
+This module parses the most common forms of ISO 8601 date strings (e.g.
+2007-01-14T20:34:22+00:00) into datetime objects.
+
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+Changes
+=======
+
+0.1.4
+-----
+
+* The default_timezone argument wasn't being passed through correctly,
+  UTC was being used in every case. Fixes issue 10.
+
+0.1.3
+-----
+
+* Fixed the microsecond handling, the generated microsecond values were
+  way too small. Fixes issue 9.
+
+0.1.2
+-----
+
+* Adding ParseError to __all__ in iso8601 module, allows people to import it.
+  Addresses issue 7.
+* Be a little more flexible when dealing with dates without leading zeroes.
+  This violates the spec a little, but handles more dates as seen in the
+  field. Addresses issue 6.
+* Allow date/time separators other than T.
+
+0.1.1
+-----
+
+* When parsing dates without a timezone the specified default is used. If no
+  default is specified then UTC is used. Addresses issue 4.
+"""
+
+setup(
+    name="iso8601",
+    version="0.1.4",
+    description=long_description.split("\n")[0],
+    long_description=long_description,
+    author="Michael Twomey",
+    author_email="micktwomey+iso8601 at gmail.com",
+    url="http://code.google.com/p/pyiso8601/",
+    packages=["iso8601"],
+    license="MIT",
+)
diff --git a/third_party/subunit/python/iso8601/test_iso8601.py b/third_party/subunit/python/iso8601/test_iso8601.py
new file mode 100644
index 0000000..6161e32
--- /dev/null
+++ b/third_party/subunit/python/iso8601/test_iso8601.py
@@ -0,0 +1,111 @@
+import iso8601
+
+def test_iso8601_regex():
+    assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
+
+def test_timezone_regex():
+    assert iso8601.TIMEZONE_REGEX.match("+01:00")
+    assert iso8601.TIMEZONE_REGEX.match("+00:00")
+    assert iso8601.TIMEZONE_REGEX.match("+01:20")
+    assert iso8601.TIMEZONE_REGEX.match("-01:00")
+
+def test_parse_date():
+    d = iso8601.parse_date("2006-10-20T15:34:56Z")
+    assert d.year == 2006
+    assert d.month == 10
+    assert d.day == 20
+    assert d.hour == 15
+    assert d.minute == 34
+    assert d.second == 56
+    assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_fraction():
+    d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
+    assert d.year == 2006
+    assert d.month == 10
+    assert d.day == 20
+    assert d.hour == 15
+    assert d.minute == 34
+    assert d.second == 56
+    assert d.microsecond == 123000
+    assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_fraction_2():
+    """From bug 6
+
+    """
+    d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
+    assert d.year == 2007
+    assert d.month == 5
+    assert d.day == 7
+    assert d.hour == 11
+    assert d.minute == 43
+    assert d.second == 55
+    assert d.microsecond == 328000
+    assert d.tzinfo == iso8601.UTC
+
+def test_parse_date_tz():
+    d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
+    assert d.year == 2006
+    assert d.month == 10
+    assert d.day == 20
+    assert d.hour == 15
+    assert d.minute == 34
+    assert d.second == 56
+    assert d.microsecond == 123000
+    assert d.tzinfo.tzname(None) == "+02:30"
+    offset = d.tzinfo.utcoffset(None)
+    assert offset.days == 0
+    assert offset.seconds == 60 * 60 * 2.5
+
+def test_parse_invalid_date():
+    try:
+        iso8601.parse_date(None)
+    except iso8601.ParseError:
+        pass
+    else:
+        assert 1 == 2
+
+def test_parse_invalid_date2():
+    try:
+        iso8601.parse_date("23")
+    except iso8601.ParseError:
+        pass
+    else:
+        assert 1 == 2
+
+def test_parse_no_timezone():
+    """issue 4 - Handle datetime string without timezone
+
+    This tests what happens when you parse a date with no timezone. While not
+    strictly correct this is quite common. I'll assume UTC for the time zone
+    in this case.
+    """
+    d = iso8601.parse_date("2007-01-01T08:00:00")
+    assert d.year == 2007
+    assert d.month == 1
+    assert d.day == 1
+    assert d.hour == 8
+    assert d.minute == 0
+    assert d.second == 0
+    assert d.microsecond == 0
+    assert d.tzinfo == iso8601.UTC
+
+def test_parse_no_timezone_different_default():
+    tz = iso8601.FixedOffset(2, 0, "test offset")
+    d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
+    assert d.tzinfo == tz
+
+def test_space_separator():
+    """Handle a separator other than T
+
+    """
+    d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
+    assert d.year == 2007
+    assert d.month == 6
+    assert d.day == 23
+    assert d.hour == 6
+    assert d.minute == 40
+    assert d.second == 34
+    assert d.microsecond == 0
+    assert d.tzinfo == iso8601.UTC
diff --git a/third_party/subunit/python/subunit/__init__.py b/third_party/subunit/python/subunit/__init__.py
new file mode 100644
index 0000000..f6d1b83
--- /dev/null
+++ b/third_party/subunit/python/subunit/__init__.py
@@ -0,0 +1,1320 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Subunit - a streaming test protocol
+
+Overview
+++++++++
+
+The ``subunit`` Python package provides a number of ``unittest`` extensions
+which can be used to cause tests to output Subunit, to parse Subunit streams
+into test activity, perform seamless test isolation within a regular test
+case and variously sort, filter and report on test runs.
+
+
+Key Classes
+-----------
+
+The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
+extension which will translate a test run into a Subunit stream.
+
+The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
+protocol and the ``unittest.TestCase`` object protocol. It is used to translate
+a stream into a test run, which regular ``unittest.TestResult`` objects can
+process and report/inspect.
+
+Subunit has support for non-blocking usage too, for use with asyncore or
+Twisted. See the ``TestProtocolServer`` parser class for more details.
+
+Subunit includes extensions to the Python ``TestResult`` protocol. These are
+all done in a compatible manner: ``TestResult`` objects that do not implement
+the extension methods will not cause errors to be raised, instead the extension
+will either lose fidelity (for instance, folding expected failures to success
+in Python versions < 2.7 or 3.1), or discard the extended data (for extra
+details, tags, timestamping and progress markers).
+
+The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
+``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
+which can be used instead of the usual python unittest parameter.
+When used the value of details should be a dict from ``string`` to
+``testtools.content.Content`` objects. This is a draft API being worked on with
+the Python Testing In Python mail list, with the goal of permitting a common
+way to provide additional data beyond a traceback, such as captured data from
+disk, logging messages etc. The reference for this API is in testtools (0.9.0
+and newer).
+
+The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
+remove tags in the test run that is currently executing. If called when no
+test is in progress (that is, if called outside of the ``startTest``,
+``stopTest`` pair), the the tags apply to all subsequent tests. If called
+when a test is in progress, then the tags only apply to that test.
+
+The ``time(a_datetime)`` method is called (if present) when a ``time:``
+directive is encountered in a Subunit stream. This is used to tell a TestResult
+about the time that events in the stream occurred at, to allow reconstructing
+test timing from a stream.
+
+The ``progress(offset, whence)`` method controls progress data for a stream.
+The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
+subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
+ignore the offset parameter.
+
+
+Python test support
+-------------------
+
+``subunit.run`` is a convenience wrapper to run a Python test suite via
+the command line, reporting via Subunit::
+
+  $ python -m subunit.run mylib.tests.test_suite
+
+The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
+tests, allowing isolation between the test runner and some tests.
+
+Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
+tests that will fork() before that individual test is run.
+
+`ExecTestCase`` is a convenience wrapper for running an external
+program to get a Subunit stream and then report that back to an arbitrary
+result object::
+
+ class AggregateTests(subunit.ExecTestCase):
+
+     def test_script_one(self):
+         './bin/script_one'
+
+     def test_script_two(self):
+         './bin/script_two'
+
+ # Normally your normal test loading would take of this automatically,
+ # It is only spelt out in detail here for clarity.
+ suite = unittest.TestSuite([AggregateTests("test_script_one"),
+     AggregateTests("test_script_two")])
+ # Create any TestResult class you like.
+ result = unittest._TextTestResult(sys.stdout)
+ # And run your suite as normal, Subunit will exec each external script as
+ # needed and report to your result object.
+ suite.run(result)
+
+Utility modules
+---------------
+
+* subunit.chunked contains HTTP chunked encoding/decoding logic.
+* subunit.test_results contains TestResult helper classes.
+"""
+
+import os
+import re
+import subprocess
+import sys
+import unittest
+try:
+    from io import UnsupportedOperation as _UnsupportedOperation
+except ImportError:
+    _UnsupportedOperation = AttributeError
+
+from extras import safe_hasattr
+from testtools import content, content_type, ExtendedToOriginalDecorator
+from testtools.content import TracebackContent
+from testtools.compat import _b, _u, BytesIO, StringIO
+try:
+    from testtools.testresult.real import _StringException
+    RemoteException = _StringException
+except ImportError:
+    raise ImportError ("testtools.testresult.real does not contain "
+        "_StringException, check your version.")
+from testtools import testresult, CopyStreamResult
+
+from subunit import chunked, details, iso8601, test_results
+from subunit.v2 import ByteStreamToStreamResult, StreamResultToBytes
+
+# same format as sys.version_info: "A tuple containing the five components of
+# the version number: major, minor, micro, releaselevel, and serial. All
+# values except releaselevel are integers; the release level is 'alpha',
+# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
+# Python version 2.0 is (2, 0, 0, 'final', 0)."  Additionally we use a
+# releaselevel of 'dev' for unreleased under-development code.
+#
+# If the releaselevel is 'alpha' then the major/minor/micro components are not
+# established at this point, and setup.py will use a version of next-$(revno).
+# If the releaselevel is 'final', then the tarball will be major.minor.micro.
+# Otherwise it is major.minor.micro~$(revno).
+
+__version__ = (0, 0, 21, 'final', 0)
+
+PROGRESS_SET = 0
+PROGRESS_CUR = 1
+PROGRESS_PUSH = 2
+PROGRESS_POP = 3
+
+
+def test_suite():
+    import subunit.tests
+    return subunit.tests.test_suite()
+
+
+def join_dir(base_path, path):
+    """
+    Returns an absolute path to C{path}, calculated relative to the parent
+    of C{base_path}.
+
+    @param base_path: A path to a file or directory.
+    @param path: An absolute path, or a path relative to the containing
+    directory of C{base_path}.
+
+    @return: An absolute path to C{path}.
+    """
+    return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
+
+
+def tags_to_new_gone(tags):
+    """Split a list of tags into a new_set and a gone_set."""
+    new_tags = set()
+    gone_tags = set()
+    for tag in tags:
+        if tag[0] == '-':
+            gone_tags.add(tag[1:])
+        else:
+            new_tags.add(tag)
+    return new_tags, gone_tags
+
+
+class DiscardStream(object):
+    """A filelike object which discards what is written to it."""
+
+    def fileno(self):
+        raise _UnsupportedOperation()
+
+    def write(self, bytes):
+        pass
+
+    def read(self, len=0):
+        return _b('')
+
+
+class _ParserState(object):
+    """State for the subunit parser."""
+
+    def __init__(self, parser):
+        self.parser = parser
+        self._test_sym = (_b('test'), _b('testing'))
+        self._colon_sym = _b(':')
+        self._error_sym = (_b('error'),)
+        self._failure_sym = (_b('failure'),)
+        self._progress_sym = (_b('progress'),)
+        self._skip_sym = _b('skip')
+        self._success_sym = (_b('success'), _b('successful'))
+        self._tags_sym = (_b('tags'),)
+        self._time_sym = (_b('time'),)
+        self._xfail_sym = (_b('xfail'),)
+        self._uxsuccess_sym = (_b('uxsuccess'),)
+        self._start_simple = _u(" [")
+        self._start_multipart = _u(" [ multipart")
+
+    def addError(self, offset, line):
+        """An 'error:' directive has been read."""
+        self.parser.stdOutLineReceived(line)
+
+    def addExpectedFail(self, offset, line):
+        """An 'xfail:' directive has been read."""
+        self.parser.stdOutLineReceived(line)
+
+    def addFailure(self, offset, line):
+        """A 'failure:' directive has been read."""
+        self.parser.stdOutLineReceived(line)
+
+    def addSkip(self, offset, line):
+        """A 'skip:' directive has been read."""
+        self.parser.stdOutLineReceived(line)
+
+    def addSuccess(self, offset, line):
+        """A 'success:' directive has been read."""
+        self.parser.stdOutLineReceived(line)
+
+    def lineReceived(self, line):
+        """a line has been received."""
+        parts = line.split(None, 1)
+        if len(parts) == 2 and line.startswith(parts[0]):
+            cmd, rest = parts
+            offset = len(cmd) + 1
+            cmd = cmd.rstrip(self._colon_sym)
+            if cmd in self._test_sym:
+                self.startTest(offset, line)
+            elif cmd in self._error_sym:
+                self.addError(offset, line)
+            elif cmd in self._failure_sym:
+                self.addFailure(offset, line)
+            elif cmd in self._progress_sym:
+                self.parser._handleProgress(offset, line)
+            elif cmd in self._skip_sym:
+                self.addSkip(offset, line)
+            elif cmd in self._success_sym:
+                self.addSuccess(offset, line)
+            elif cmd in self._tags_sym:
+                self.parser._handleTags(offset, line)
+                self.parser.subunitLineReceived(line)
+            elif cmd in self._time_sym:
+                self.parser._handleTime(offset, line)
+                self.parser.subunitLineReceived(line)
+            elif cmd in self._xfail_sym:
+                self.addExpectedFail(offset, line)
+            elif cmd in self._uxsuccess_sym:
+                self.addUnexpectedSuccess(offset, line)
+            else:
+                self.parser.stdOutLineReceived(line)
+        else:
+            self.parser.stdOutLineReceived(line)
+
+    def lostConnection(self):
+        """Connection lost."""
+        self.parser._lostConnectionInTest(_u('unknown state of '))
+
+    def startTest(self, offset, line):
+        """A test start command received."""
+        self.parser.stdOutLineReceived(line)
+
+
+class _InTest(_ParserState):
+    """State for the subunit parser after reading a test: directive."""
+
+    def _outcome(self, offset, line, no_details, details_state):
+        """An outcome directive has been read.
+
+        :param no_details: Callable to call when no details are presented.
+        :param details_state: The state to switch to for details
+            processing of this outcome.
+        """
+        test_name = line[offset:-1].decode('utf8')
+        if self.parser.current_test_description == test_name:
+            self.parser._state = self.parser._outside_test
+            self.parser.current_test_description = None
+            no_details()
+            self.parser.client.stopTest(self.parser._current_test)
+            self.parser._current_test = None
+            self.parser.subunitLineReceived(line)
+        elif self.parser.current_test_description + self._start_simple == \
+            test_name:
+            self.parser._state = details_state
+            details_state.set_simple()
+            self.parser.subunitLineReceived(line)
+        elif self.parser.current_test_description + self._start_multipart == \
+            test_name:
+            self.parser._state = details_state
+            details_state.set_multipart()
+            self.parser.subunitLineReceived(line)
+        else:
+            self.parser.stdOutLineReceived(line)
+
+    def _error(self):
+        self.parser.client.addError(self.parser._current_test,
+            details={})
+
+    def addError(self, offset, line):
+        """An 'error:' directive has been read."""
+        self._outcome(offset, line, self._error,
+            self.parser._reading_error_details)
+
+    def _xfail(self):
+        self.parser.client.addExpectedFailure(self.parser._current_test,
+            details={})
+
+    def addExpectedFail(self, offset, line):
+        """An 'xfail:' directive has been read."""
+        self._outcome(offset, line, self._xfail,
+            self.parser._reading_xfail_details)
+
+    def _uxsuccess(self):
+        self.parser.client.addUnexpectedSuccess(self.parser._current_test)
+
+    def addUnexpectedSuccess(self, offset, line):
+        """A 'uxsuccess:' directive has been read."""
+        self._outcome(offset, line, self._uxsuccess,
+            self.parser._reading_uxsuccess_details)
+
+    def _failure(self):
+        self.parser.client.addFailure(self.parser._current_test, details={})
+
+    def addFailure(self, offset, line):
+        """A 'failure:' directive has been read."""
+        self._outcome(offset, line, self._failure,
+            self.parser._reading_failure_details)
+
+    def _skip(self):
+        self.parser.client.addSkip(self.parser._current_test, details={})
+
+    def addSkip(self, offset, line):
+        """A 'skip:' directive has been read."""
+        self._outcome(offset, line, self._skip,
+            self.parser._reading_skip_details)
+
+    def _succeed(self):
+        self.parser.client.addSuccess(self.parser._current_test, details={})
+
+    def addSuccess(self, offset, line):
+        """A 'success:' directive has been read."""
+        self._outcome(offset, line, self._succeed,
+            self.parser._reading_success_details)
+
+    def lostConnection(self):
+        """Connection lost."""
+        self.parser._lostConnectionInTest(_u(''))
+
+
+class _OutSideTest(_ParserState):
+    """State for the subunit parser outside of a test context."""
+
+    def lostConnection(self):
+        """Connection lost."""
+
+    def startTest(self, offset, line):
+        """A test start command received."""
+        self.parser._state = self.parser._in_test
+        test_name = line[offset:-1].decode('utf8')
+        self.parser._current_test = RemotedTestCase(test_name)
+        self.parser.current_test_description = test_name
+        self.parser.client.startTest(self.parser._current_test)
+        self.parser.subunitLineReceived(line)
+
+
+class _ReadingDetails(_ParserState):
+    """Common logic for readin state details."""
+
+    def endDetails(self):
+        """The end of a details section has been reached."""
+        self.parser._state = self.parser._outside_test
+        self.parser.current_test_description = None
+        self._report_outcome()
+        self.parser.client.stopTest(self.parser._current_test)
+
+    def lineReceived(self, line):
+        """a line has been received."""
+        self.details_parser.lineReceived(line)
+        self.parser.subunitLineReceived(line)
+
+    def lostConnection(self):
+        """Connection lost."""
+        self.parser._lostConnectionInTest(_u('%s report of ') %
+            self._outcome_label())
+
+    def _outcome_label(self):
+        """The label to describe this outcome."""
+        raise NotImplementedError(self._outcome_label)
+
+    def set_simple(self):
+        """Start a simple details parser."""
+        self.details_parser = details.SimpleDetailsParser(self)
+
+    def set_multipart(self):
+        """Start a multipart details parser."""
+        self.details_parser = details.MultipartDetailsParser(self)
+
+
+class _ReadingFailureDetails(_ReadingDetails):
+    """State for the subunit parser when reading failure details."""
+
+    def _report_outcome(self):
+        self.parser.client.addFailure(self.parser._current_test,
+            details=self.details_parser.get_details())
+
+    def _outcome_label(self):
+        return "failure"
+
+
+class _ReadingErrorDetails(_ReadingDetails):
+    """State for the subunit parser when reading error details."""
+
+    def _report_outcome(self):
+        self.parser.client.addError(self.parser._current_test,
+            details=self.details_parser.get_details())
+
+    def _outcome_label(self):
+        return "error"
+
+
+class _ReadingExpectedFailureDetails(_ReadingDetails):
+    """State for the subunit parser when reading xfail details."""
+
+    def _report_outcome(self):
+        self.parser.client.addExpectedFailure(self.parser._current_test,
+            details=self.details_parser.get_details())
+
+    def _outcome_label(self):
+        return "xfail"
+
+
+class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
+    """State for the subunit parser when reading uxsuccess details."""
+
+    def _report_outcome(self):
+        self.parser.client.addUnexpectedSuccess(self.parser._current_test,
+            details=self.details_parser.get_details())
+
+    def _outcome_label(self):
+        return "uxsuccess"
+
+
+class _ReadingSkipDetails(_ReadingDetails):
+    """State for the subunit parser when reading skip details."""
+
+    def _report_outcome(self):
+        self.parser.client.addSkip(self.parser._current_test,
+            details=self.details_parser.get_details("skip"))
+
+    def _outcome_label(self):
+        return "skip"
+
+
+class _ReadingSuccessDetails(_ReadingDetails):
+    """State for the subunit parser when reading success details."""
+
+    def _report_outcome(self):
+        self.parser.client.addSuccess(self.parser._current_test,
+            details=self.details_parser.get_details("success"))
+
+    def _outcome_label(self):
+        return "success"
+
+
+class TestProtocolServer(object):
+    """A parser for subunit.
+
+    :ivar tags: The current tags associated with the protocol stream.
+    """
+
+    def __init__(self, client, stream=None, forward_stream=None):
+        """Create a TestProtocolServer instance.
+
+        :param client: An object meeting the unittest.TestResult protocol.
+        :param stream: The stream that lines received which are not part of the
+            subunit protocol should be written to. This allows custom handling
+            of mixed protocols. By default, sys.stdout will be used for
+            convenience. It should accept bytes to its write() method.
+        :param forward_stream: A stream to forward subunit lines to. This
+            allows a filter to forward the entire stream while still parsing
+            and acting on it. By default forward_stream is set to
+            DiscardStream() and no forwarding happens.
+        """
+        self.client = ExtendedToOriginalDecorator(client)
+        if stream is None:
+            stream = sys.stdout
+            if sys.version_info > (3, 0):
+                stream = stream.buffer
+        self._stream = stream
+        self._forward_stream = forward_stream or DiscardStream()
+        # state objects we can switch too
+        self._in_test = _InTest(self)
+        self._outside_test = _OutSideTest(self)
+        self._reading_error_details = _ReadingErrorDetails(self)
+        self._reading_failure_details = _ReadingFailureDetails(self)
+        self._reading_skip_details = _ReadingSkipDetails(self)
+        self._reading_success_details = _ReadingSuccessDetails(self)
+        self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
+        self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
+        # start with outside test.
+        self._state = self._outside_test
+        # Avoid casts on every call
+        self._plusminus = _b('+-')
+        self._push_sym = _b('push')
+        self._pop_sym = _b('pop')
+
+    def _handleProgress(self, offset, line):
+        """Process a progress directive."""
+        line = line[offset:].strip()
+        if line[0] in self._plusminus:
+            whence = PROGRESS_CUR
+            delta = int(line)
+        elif line == self._push_sym:
+            whence = PROGRESS_PUSH
+            delta = None
+        elif line == self._pop_sym:
+            whence = PROGRESS_POP
+            delta = None
+        else:
+            whence = PROGRESS_SET
+            delta = int(line)
+        self.client.progress(delta, whence)
+
+    def _handleTags(self, offset, line):
+        """Process a tags command."""
+        tags = line[offset:].decode('utf8').split()
+        new_tags, gone_tags = tags_to_new_gone(tags)
+        self.client.tags(new_tags, gone_tags)
+
+    def _handleTime(self, offset, line):
+        # Accept it, but do not do anything with it yet.
+        try:
+            event_time = iso8601.parse_date(line[offset:-1])
+        except TypeError:
+            raise TypeError(_u("Failed to parse %r, got %r")
+                % (line, sys.exec_info[1]))
+        self.client.time(event_time)
+
+    def lineReceived(self, line):
+        """Call the appropriate local method for the received line."""
+        self._state.lineReceived(line)
+
+    def _lostConnectionInTest(self, state_string):
+        error_string = _u("lost connection during %stest '%s'") % (
+            state_string, self.current_test_description)
+        self.client.addError(self._current_test, RemoteError(error_string))
+        self.client.stopTest(self._current_test)
+
+    def lostConnection(self):
+        """The input connection has finished."""
+        self._state.lostConnection()
+
+    def readFrom(self, pipe):
+        """Blocking convenience API to parse an entire stream.
+
+        :param pipe: A file-like object supporting readlines().
+        :return: None.
+        """
+        for line in pipe.readlines():
+            self.lineReceived(line)
+        self.lostConnection()
+
+    def _startTest(self, offset, line):
+        """Internal call to change state machine. Override startTest()."""
+        self._state.startTest(offset, line)
+
+    def subunitLineReceived(self, line):
+        self._forward_stream.write(line)
+
+    def stdOutLineReceived(self, line):
+        self._stream.write(line)
+
+
+class TestProtocolClient(testresult.TestResult):
+    """A TestResult which generates a subunit stream for a test run.
+
+    # Get a TestSuite or TestCase to run
+    suite = make_suite()
+    # Create a stream (any object with a 'write' method). This should accept
+    # bytes not strings: subunit is a byte orientated protocol.
+    stream = file('tests.log', 'wb')
+    # Create a subunit result object which will output to the stream
+    result = subunit.TestProtocolClient(stream)
+    # Optionally, to get timing data for performance analysis, wrap the
+    # serialiser with a timing decorator
+    result = subunit.test_results.AutoTimingTestResultDecorator(result)
+    # Run the test suite reporting to the subunit result object
+    suite.run(result)
+    # Close the stream.
+    stream.close()
+    """
+
+    def __init__(self, stream):
+        testresult.TestResult.__init__(self)
+        stream = make_stream_binary(stream)
+        self._stream = stream
+        self._progress_fmt = _b("progress: ")
+        self._bytes_eol = _b("\n")
+        self._progress_plus = _b("+")
+        self._progress_push = _b("push")
+        self._progress_pop = _b("pop")
+        self._empty_bytes = _b("")
+        self._start_simple = _b(" [\n")
+        self._end_simple = _b("]\n")
+
+    def addError(self, test, error=None, details=None):
+        """Report an error in test test.
+
+        Only one of error and details should be provided: conceptually there
+        are two separate methods:
+            addError(self, test, error)
+            addError(self, test, details)
+
+        :param error: Standard unittest positional argument form - an
+            exc_info tuple.
+        :param details: New Testing-in-python drafted API; a dict from string
+            to subunit.Content objects.
+        """
+        self._addOutcome("error", test, error=error, details=details)
+        if self.failfast:
+            self.stop()
+
+    def addExpectedFailure(self, test, error=None, details=None):
+        """Report an expected failure in test test.
+
+        Only one of error and details should be provided: conceptually there
+        are two separate methods:
+            addError(self, test, error)
+            addError(self, test, details)
+
+        :param error: Standard unittest positional argument form - an
+            exc_info tuple.
+        :param details: New Testing-in-python drafted API; a dict from string
+            to subunit.Content objects.
+        """
+        self._addOutcome("xfail", test, error=error, details=details)
+
+    def addFailure(self, test, error=None, details=None):
+        """Report a failure in test test.
+
+        Only one of error and details should be provided: conceptually there
+        are two separate methods:
+            addFailure(self, test, error)
+            addFailure(self, test, details)
+
+        :param error: Standard unittest positional argument form - an
+            exc_info tuple.
+        :param details: New Testing-in-python drafted API; a dict from string
+            to subunit.Content objects.
+        """
+        self._addOutcome("failure", test, error=error, details=details)
+        if self.failfast:
+            self.stop()
+
+    def _addOutcome(self, outcome, test, error=None, details=None,
+        error_permitted=True):
+        """Report a failure in test test.
+
+        Only one of error and details should be provided: conceptually there
+        are two separate methods:
+            addOutcome(self, test, error)
+            addOutcome(self, test, details)
+
+        :param outcome: A string describing the outcome - used as the
+            event name in the subunit stream.
+        :param error: Standard unittest positional argument form - an
+            exc_info tuple.
+        :param details: New Testing-in-python drafted API; a dict from string
+            to subunit.Content objects.
+        :param error_permitted: If True then one and only one of error or
+            details must be supplied. If False then error must not be supplied
+            and details is still optional.  """
+        self._stream.write(_b("%s: " % outcome) + self._test_id(test))
+        if error_permitted:
+            if error is None and details is None:
+                raise ValueError
+        else:
+            if error is not None:
+                raise ValueError
+        if error is not None:
+            self._stream.write(self._start_simple)
+            tb_content = TracebackContent(error, test)
+            for bytes in tb_content.iter_bytes():
+                self._stream.write(bytes)
+        elif details is not None:
+            self._write_details(details)
+        else:
+            self._stream.write(_b("\n"))
+        if details is not None or error is not None:
+            self._stream.write(self._end_simple)
+
+    def addSkip(self, test, reason=None, details=None):
+        """Report a skipped test."""
+        if reason is None:
+            self._addOutcome("skip", test, error=None, details=details)
+        else:
+            self._stream.write(_b("skip: %s [\n" % test.id()))
+            self._stream.write(_b("%s\n" % reason))
+            self._stream.write(self._end_simple)
+
+    def addSuccess(self, test, details=None):
+        """Report a success in a test."""
+        self._addOutcome("successful", test, details=details, error_permitted=False)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        """Report an unexpected success in test test.
+
+        Details can optionally be provided: conceptually there
+        are two separate methods:
+            addError(self, test)
+            addError(self, test, details)
+
+        :param details: New Testing-in-python drafted API; a dict from string
+            to subunit.Content objects.
+        """
+        self._addOutcome("uxsuccess", test, details=details,
+            error_permitted=False)
+        if self.failfast:
+            self.stop()
+
+    def _test_id(self, test):
+        result = test.id()
+        if type(result) is not bytes:
+            result = result.encode('utf8')
+        return result
+
+    def startTest(self, test):
+        """Mark a test as starting its test run."""
+        super(TestProtocolClient, self).startTest(test)
+        self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
+        self._stream.flush()
+
+    def stopTest(self, test):
+        super(TestProtocolClient, self).stopTest(test)
+        self._stream.flush()
+
+    def progress(self, offset, whence):
+        """Provide indication about the progress/length of the test run.
+
+        :param offset: Information about the number of tests remaining. If
+            whence is PROGRESS_CUR, then offset increases/decreases the
+            remaining test count. If whence is PROGRESS_SET, then offset
+            specifies exactly the remaining test count.
+        :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
+            PROGRESS_POP.
+        """
+        if whence == PROGRESS_CUR and offset > -1:
+            prefix = self._progress_plus
+            offset = _b(str(offset))
+        elif whence == PROGRESS_PUSH:
+            prefix = self._empty_bytes
+            offset = self._progress_push
+        elif whence == PROGRESS_POP:
+            prefix = self._empty_bytes
+            offset = self._progress_pop
+        else:
+            prefix = self._empty_bytes
+            offset = _b(str(offset))
+        self._stream.write(self._progress_fmt + prefix + offset +
+            self._bytes_eol)
+
+    def tags(self, new_tags, gone_tags):
+        """Inform the client about tags added/removed from the stream."""
+        if not new_tags and not gone_tags:
+            return
+        tags = set([tag.encode('utf8') for tag in new_tags])
+        tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
+        tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
+        self._stream.write(tag_line)
+
+    def time(self, a_datetime):
+        """Inform the client of the time.
+
+        ":param datetime: A datetime.datetime object.
+        """
+        time = a_datetime.astimezone(iso8601.Utc())
+        self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
+            time.year, time.month, time.day, time.hour, time.minute,
+            time.second, time.microsecond)))
+
+    def _write_details(self, details):
+        """Output details to the stream.
+
+        :param details: An extended details dict for a test outcome.
+        """
+        self._stream.write(_b(" [ multipart\n"))
+        for name, content in sorted(details.items()):
+            self._stream.write(_b("Content-Type: %s/%s" %
+                (content.content_type.type, content.content_type.subtype)))
+            parameters = content.content_type.parameters
+            if parameters:
+                self._stream.write(_b(";"))
+                param_strs = []
+                for param, value in parameters.items():
+                    param_strs.append("%s=%s" % (param, value))
+                self._stream.write(_b(",".join(param_strs)))
+            self._stream.write(_b("\n%s\n" % name))
+            encoder = chunked.Encoder(self._stream)
+            list(map(encoder.write, content.iter_bytes()))
+            encoder.close()
+
+    def done(self):
+        """Obey the testtools result.done() interface."""
+
+
+def RemoteError(description=_u("")):
+    return (_StringException, _StringException(description), None)
+
+
+class RemotedTestCase(unittest.TestCase):
+    """A class to represent test cases run in child processes.
+
+    Instances of this class are used to provide the Python test API a TestCase
+    that can be printed to the screen, introspected for metadata and so on.
+    However, as they are a simply a memoisation of a test that was actually
+    run in the past by a separate process, they cannot perform any interactive
+    actions.
+    """
+
+    def __eq__ (self, other):
+        try:
+            return self.__description == other.__description
+        except AttributeError:
+            return False
+
+    def __init__(self, description):
+        """Create a psuedo test case with description description."""
+        self.__description = description
+
+    def error(self, label):
+        raise NotImplementedError("%s on RemotedTestCases is not permitted." %
+            label)
+
+    def setUp(self):
+        self.error("setUp")
+
+    def tearDown(self):
+        self.error("tearDown")
+
+    def shortDescription(self):
+        return self.__description
+
+    def id(self):
+        return "%s" % (self.__description,)
+
+    def __str__(self):
+        return "%s (%s)" % (self.__description, self._strclass())
+
+    def __repr__(self):
+        return "<%s description='%s'>" % \
+               (self._strclass(), self.__description)
+
+    def run(self, result=None):
+        if result is None: result = self.defaultTestResult()
+        result.startTest(self)
+        result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
+        result.stopTest(self)
+
+    def _strclass(self):
+        cls = self.__class__
+        return "%s.%s" % (cls.__module__, cls.__name__)
+
+
+class ExecTestCase(unittest.TestCase):
+    """A test case which runs external scripts for test fixtures."""
+
+    def __init__(self, methodName='runTest'):
+        """Create an instance of the class that will use the named test
+           method when executed. Raises a ValueError if the instance does
+           not have a method with the specified name.
+        """
+        unittest.TestCase.__init__(self, methodName)
+        testMethod = getattr(self, methodName)
+        self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
+                               testMethod.__doc__)
+
+    def countTestCases(self):
+        return 1
+
+    def run(self, result=None):
+        if result is None: result = self.defaultTestResult()
+        self._run(result)
+
+    def debug(self):
+        """Run the test without collecting errors in a TestResult"""
+        self._run(testresult.TestResult())
+
+    def _run(self, result):
+        protocol = TestProtocolServer(result)
+        process = subprocess.Popen(self.script, shell=True,
+            stdout=subprocess.PIPE)
+        make_stream_binary(process.stdout)
+        output = process.communicate()[0]
+        protocol.readFrom(BytesIO(output))
+
+
+class IsolatedTestCase(unittest.TestCase):
+    """A TestCase which executes in a forked process.
+
+    Each test gets its own process, which has a performance overhead but will
+    provide excellent isolation from global state (such as django configs,
+    zope utilities and so on).
+    """
+
+    def run(self, result=None):
+        if result is None: result = self.defaultTestResult()
+        run_isolated(unittest.TestCase, self, result)
+
+
+class IsolatedTestSuite(unittest.TestSuite):
+    """A TestSuite which runs its tests in a forked process.
+
+    This decorator that will fork() before running the tests and report the
+    results from the child process using a Subunit stream.  This is useful for
+    handling tests that mutate global state, or are testing C extensions that
+    could crash the VM.
+    """
+
+    def run(self, result=None):
+        if result is None: result = testresult.TestResult()
+        run_isolated(unittest.TestSuite, self, result)
+
+
+def run_isolated(klass, self, result):
+    """Run a test suite or case in a subprocess, using the run method on klass.
+    """
+    c2pread, c2pwrite = os.pipe()
+    # fixme - error -> result
+    # now fork
+    pid = os.fork()
+    if pid == 0:
+        # Child
+        # Close parent's pipe ends
+        os.close(c2pread)
+        # Dup fds for child
+        os.dup2(c2pwrite, 1)
+        # Close pipe fds.
+        os.close(c2pwrite)
+
+        # at this point, sys.stdin is redirected, now we want
+        # to filter it to escape ]'s.
+        ### XXX: test and write that bit.
+        stream = os.fdopen(1, 'wb')
+        result = TestProtocolClient(stream)
+        klass.run(self, result)
+        stream.flush()
+        sys.stderr.flush()
+        # exit HARD, exit NOW.
+        os._exit(0)
+    else:
+        # Parent
+        # Close child pipe ends
+        os.close(c2pwrite)
+        # hookup a protocol engine
+        protocol = TestProtocolServer(result)
+        fileobj = os.fdopen(c2pread, 'rb')
+        protocol.readFrom(fileobj)
+        os.waitpid(pid, 0)
+        # TODO return code evaluation.
+    return result
+
+
+def TAP2SubUnit(tap, output_stream):
+    """Filter a TAP pipe into a subunit pipe.
+
+    This should be invoked once per TAP script, as TAP scripts get
+    mapped to a single runnable case with multiple components.
+
+    :param tap: A tap pipe/stream/file object - should emit unicode strings.
+    :param subunit: A pipe/stream/file object to write subunit results to.
+    :return: The exit code to exit with.
+    """
+    output = StreamResultToBytes(output_stream)
+    UTF8_TEXT = 'text/plain; charset=UTF8'
+    BEFORE_PLAN = 0
+    AFTER_PLAN = 1
+    SKIP_STREAM = 2
+    state = BEFORE_PLAN
+    plan_start = 1
+    plan_stop = 0
+    # Test data for the next test to emit
+    test_name = None
+    log = []
+    result = None
+    def missing_test(plan_start):
+        output.status(test_id='test %d' % plan_start,
+            test_status='fail', runnable=False,
+            mime_type=UTF8_TEXT, eof=True, file_name="tap meta",
+            file_bytes=b"test missing from TAP output")
+    def _emit_test():
+        "write out a test"
+        if test_name is None:
+            return
+        if log:
+            log_bytes = b'\n'.join(log_line.encode('utf8') for log_line in log)
+            mime_type = UTF8_TEXT
+            file_name = 'tap comment'
+            eof = True
+        else:
+            log_bytes = None
+            mime_type = None
+            file_name = None
+            eof = True
+        del log[:]
+        output.status(test_id=test_name, test_status=result,
+            file_bytes=log_bytes, mime_type=mime_type, eof=eof,
+            file_name=file_name, runnable=False)
+    for line in tap:
+        if state == BEFORE_PLAN:
+            match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
+            if match:
+                state = AFTER_PLAN
+                _, plan_stop, comment = match.groups()
+                plan_stop = int(plan_stop)
+                if plan_start > plan_stop and plan_stop == 0:
+                    # skipped file
+                    state = SKIP_STREAM
+                    output.status(test_id='file skip', test_status='skip',
+                        file_bytes=comment.encode('utf8'), eof=True,
+                        file_name='tap comment')
+                continue
+        # not a plan line, or have seen one before
+        match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
+        if match:
+            # new test, emit current one.
+            _emit_test()
+            status, number, description, directive, directive_comment = match.groups()
+            if status == 'ok':
+                result = 'success'
+            else:
+                result = "fail"
+            if description is None:
+                description = ''
+            else:
+                description = ' ' + description
+            if directive is not None:
+                if directive.upper() == 'TODO':
+                    result = 'xfail'
+                elif directive.upper() == 'SKIP':
+                    result = 'skip'
+                if directive_comment is not None:
+                    log.append(directive_comment)
+            if number is not None:
+                number = int(number)
+                while plan_start < number:
+                    missing_test(plan_start)
+                    plan_start += 1
+            test_name = "test %d%s" % (plan_start, description)
+            plan_start += 1
+            continue
+        match = re.match("Bail out\!(?:\s*(.*))?\n", line)
+        if match:
+            reason, = match.groups()
+            if reason is None:
+                extra = ''
+            else:
+                extra = ' %s' % reason
+            _emit_test()
+            test_name = "Bail out!%s" % extra
+            result = "fail"
+            state = SKIP_STREAM
+            continue
+        match = re.match("\#.*\n", line)
+        if match:
+            log.append(line[:-1])
+            continue
+        # Should look at buffering status and binding this to the prior result.
+        output.status(file_bytes=line.encode('utf8'), file_name='stdout',
+            mime_type=UTF8_TEXT)
+    _emit_test()
+    while plan_start <= plan_stop:
+        # record missed tests
+        missing_test(plan_start)
+        plan_start += 1
+    return 0
+
+
+def tag_stream(original, filtered, tags):
+    """Alter tags on a stream.
+
+    :param original: The input stream.
+    :param filtered: The output stream.
+    :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
+        '-TAG' commands.
+
+        A 'TAG' command will add the tag to the output stream,
+        and override any existing '-TAG' command in that stream.
+        Specifically:
+         * A global 'tags: TAG' will be added to the start of the stream.
+         * Any tags commands with -TAG will have the -TAG removed.
+
+        A '-TAG' command will remove the TAG command from the stream.
+        Specifically:
+         * A 'tags: -TAG' command will be added to the start of the stream.
+         * Any 'tags: TAG' command will have 'TAG' removed from it.
+        Additionally, any redundant tagging commands (adding a tag globally
+        present, or removing a tag globally removed) are stripped as a
+        by-product of the filtering.
+    :return: 0
+    """
+    new_tags, gone_tags = tags_to_new_gone(tags)
+    source = ByteStreamToStreamResult(original, non_subunit_name='stdout')
+    class Tagger(CopyStreamResult):
+        def status(self, **kwargs):
+            tags = kwargs.get('test_tags')
+            if not tags:
+                tags = set()
+            tags.update(new_tags)
+            tags.difference_update(gone_tags)
+            if tags:
+                kwargs['test_tags'] = tags
+            else:
+                kwargs['test_tags'] = None
+            super(Tagger, self).status(**kwargs)
+    output = Tagger([StreamResultToBytes(filtered)])
+    source.run(output)
+    return 0
+
+
+class ProtocolTestCase(object):
+    """Subunit wire protocol to unittest.TestCase adapter.
+
+    ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
+    calling a ProtocolTestCase or invoking the run() method will make a 'test
+    run' happen. The 'test run' will simply be a replay of the test activity
+    that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
+    and ``countTestCases`` methods are not supported because there isn't a
+    sensible mapping for those methods.
+
+    # Get a stream (any object with a readline() method), in this case the
+    # stream output by the example from ``subunit.TestProtocolClient``.
+    stream = file('tests.log', 'rb')
+    # Create a parser which will read from the stream and emit
+    # activity to a unittest.TestResult when run() is called.
+    suite = subunit.ProtocolTestCase(stream)
+    # Create a result object to accept the contents of that stream.
+    result = unittest._TextTestResult(sys.stdout)
+    # 'run' the tests - process the stream and feed its contents to result.
+    suite.run(result)
+    stream.close()
+
+    :seealso: TestProtocolServer (the subunit wire protocol parser).
+    """
+
+    def __init__(self, stream, passthrough=None, forward=None):
+        """Create a ProtocolTestCase reading from stream.
+
+        :param stream: A filelike object which a subunit stream can be read
+            from.
+        :param passthrough: A stream pass non subunit input on to. If not
+            supplied, the TestProtocolServer default is used.
+        :param forward: A stream to pass subunit input on to. If not supplied
+            subunit input is not forwarded.
+        """
+        stream = make_stream_binary(stream)
+        self._stream = stream
+        self._passthrough = passthrough
+        if forward is not None:
+            forward = make_stream_binary(forward)
+        self._forward = forward
+
+    def __call__(self, result=None):
+        return self.run(result)
+
+    def run(self, result=None):
+        if result is None:
+            result = self.defaultTestResult()
+        protocol = TestProtocolServer(result, self._passthrough, self._forward)
+        line = self._stream.readline()
+        while line:
+            protocol.lineReceived(line)
+            line = self._stream.readline()
+        protocol.lostConnection()
+
+
+class TestResultStats(testresult.TestResult):
+    """A pyunit TestResult interface implementation for making statistics.
+
+    :ivar total_tests: The total tests seen.
+    :ivar passed_tests: The tests that passed.
+    :ivar failed_tests: The tests that failed.
+    :ivar seen_tags: The tags seen across all tests.
+    """
+
+    def __init__(self, stream):
+        """Create a TestResultStats which outputs to stream."""
+        testresult.TestResult.__init__(self)
+        self._stream = stream
+        self.failed_tests = 0
+        self.skipped_tests = 0
+        self.seen_tags = set()
+
+    @property
+    def total_tests(self):
+        return self.testsRun
+
+    def addError(self, test, err, details=None):
+        self.failed_tests += 1
+
+    def addFailure(self, test, err, details=None):
+        self.failed_tests += 1
+
+    def addSkip(self, test, reason, details=None):
+        self.skipped_tests += 1
+
+    def formatStats(self):
+        self._stream.write("Total tests:   %5d\n" % self.total_tests)
+        self._stream.write("Passed tests:  %5d\n" % self.passed_tests)
+        self._stream.write("Failed tests:  %5d\n" % self.failed_tests)
+        self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
+        tags = sorted(self.seen_tags)
+        self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
+
+    @property
+    def passed_tests(self):
+        return self.total_tests - self.failed_tests - self.skipped_tests
+
+    def tags(self, new_tags, gone_tags):
+        """Accumulate the seen tags."""
+        self.seen_tags.update(new_tags)
+
+    def wasSuccessful(self):
+        """Tells whether or not this result was a success"""
+        return self.failed_tests == 0
+
+
+def get_default_formatter():
+    """Obtain the default formatter to write to.
+
+    :return: A file-like object.
+    """
+    formatter = os.getenv("SUBUNIT_FORMATTER")
+    if formatter:
+        return os.popen(formatter, "w")
+    else:
+        stream = sys.stdout
+        if sys.version_info > (3, 0):
+            if safe_hasattr(stream, 'buffer'):
+                stream = stream.buffer
+        return stream
+
+
+def read_test_list(path):
+    """Read a list of test ids from a file on disk.
+
+    :param path: Path to the file
+    :return: Sequence of test ids
+    """
+    f = open(path, 'rb')
+    try:
+        return [l.rstrip("\n") for l in f.readlines()]
+    finally:
+        f.close()
+
+
+def make_stream_binary(stream):
+    """Ensure that a stream will be binary safe. See _make_binary_on_windows.
+
+    :return: A binary version of the same stream (some streams cannot be
+        'fixed' but can be unwrapped).
+    """
+    try:
+        fileno = stream.fileno()
+    except (_UnsupportedOperation, AttributeError):
+        pass
+    else:
+        _make_binary_on_windows(fileno)
+    return _unwrap_text(stream)
+
+
+def _make_binary_on_windows(fileno):
+    """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
+    if sys.platform == "win32":
+        import msvcrt
+        msvcrt.setmode(fileno, os.O_BINARY)
+
+
+def _unwrap_text(stream):
+    """Unwrap stream if it is a text stream to get the original buffer."""
+    if sys.version_info > (3, 0):
+        unicode_type = str
+    else:
+        unicode_type = unicode
+    try:
+        # Read streams
+        if type(stream.read(0)) is unicode_type:
+            return stream.buffer
+    except (_UnsupportedOperation, IOError):
+        # Cannot read from the stream: try via writes
+        try:
+            stream.write(_b(''))
+        except TypeError:
+            return stream.buffer
+    return stream
diff --git a/third_party/subunit/python/subunit/_output.py b/third_party/subunit/python/subunit/_output.py
new file mode 100644
index 0000000..aa92646
--- /dev/null
+++ b/third_party/subunit/python/subunit/_output.py
@@ -0,0 +1,203 @@
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2013 Subunit Contributors
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import datetime
+from functools import partial
+from optparse import (
+    OptionGroup,
+    OptionParser,
+    OptionValueError,
+)
+import sys
+
+from subunit import make_stream_binary
+from subunit.iso8601 import UTC
+from subunit.v2 import StreamResultToBytes
+
+
+_FINAL_ACTIONS = frozenset([
+    'exists',
+    'fail',
+    'skip',
+    'success',
+    'uxsuccess',
+    'xfail',
+])
+_ALL_ACTIONS = _FINAL_ACTIONS.union(['inprogress'])
+_CHUNK_SIZE=3670016 # 3.5 MiB
+
+
+def output_main():
+    args = parse_arguments()
+    output = StreamResultToBytes(sys.stdout)
+    generate_stream_results(args, output)
+    return 0
+
+
+def parse_arguments(args=None, ParserClass=OptionParser):
+    """Parse arguments from the command line.
+
+    If specified, args must be a list of strings, similar to sys.argv[1:].
+
+    ParserClass may be specified to override the class we use to parse the
+    command-line arguments. This is useful for testing.
+    """
+    parser = ParserClass(
+        prog="subunit-output",
+        description="A tool to generate a subunit v2 result byte-stream",
+        usage="subunit-output [-h] [status TEST_ID] [options]",
+    )
+    parser.set_default('tags', None)
+    parser.set_default('test_id', None)
+
+    status_commands = OptionGroup(
+        parser,
+        "Status Commands",
+        "These options report the status of a test. TEST_ID must be a string "
+            "that uniquely identifies the test."
+    )
+    for action_name in _ALL_ACTIONS:
+        status_commands.add_option(
+            "--%s" % action_name,
+            nargs=1,
+            action="callback",
+            callback=set_status_cb,
+            callback_args=(action_name,),
+            dest="action",
+            metavar="TEST_ID",
+            help="Report a test status."
+        )
+    parser.add_option_group(status_commands)
+
+    file_commands = OptionGroup(
+        parser,
+        "File Options",
+        "These options control attaching data to a result stream. They can "
+            "either be specified with a status command, in which case the file "
+            "is attached to the test status, or by themselves, in which case "
+            "the file is attached to the stream (and not associated with any "
+            "test id)."
+    )
+    file_commands.add_option(
+        "--attach-file",
+        help="Attach a file to the result stream for this test. If '-' is "
+            "specified, stdin will be read instead. In this case, the file "
+            "name will be set to 'stdin' (but can still be overridden with "
+            "the --file-name option)."
+    )
+    file_commands.add_option(
+        "--file-name",
+        help="The name to give this file attachment. If not specified, the "
+            "name of the file on disk will be used, or 'stdin' in the case "
+            "where '-' was passed to the '--attach-file' argument. This option"
+            " may only be specified when '--attach-file' is specified.",
+        )
+    file_commands.add_option(
+        "--mimetype",
+        help="The mime type to send with this file. This is only used if the "
+            "--attach-file argument is used. This argument is optional. If it "
+            "is not specified, the file will be sent without a mime type. This "
+            "option may only be specified when '--attach-file' is specified.",
+        default=None
+    )
+    parser.add_option_group(file_commands)
+
+    parser.add_option(
+        "--tag",
+        help="Specifies a tag. May be used multiple times",
+        action="append",
+        dest="tags",
+        default=[]
+    )
+
+    (options, args) = parser.parse_args(args)
+    if options.mimetype and not options.attach_file:
+        parser.error("Cannot specify --mimetype without --attach-file")
+    if options.file_name and not options.attach_file:
+        parser.error("Cannot specify --file-name without --attach-file")
+    if options.attach_file:
+        if options.attach_file == '-':
+            if not options.file_name:
+                options.file_name = 'stdin'
+                options.attach_file = make_stream_binary(sys.stdin)
+        else:
+            try:
+                options.attach_file = open(options.attach_file, 'rb')
+            except IOError as e:
+                parser.error("Cannot open %s (%s)" % (options.attach_file, e.strerror))
+
+    return options
+
+
+def set_status_cb(option, opt_str, value, parser, status_name):
+    if getattr(parser.values, "action", None) is not None:
+        raise OptionValueError("argument %s: Only one status may be specified at once." % opt_str)
+
+    if len(parser.rargs) == 0:
+        raise OptionValueError("argument %s: must specify a single TEST_ID." % opt_str)
+    parser.values.action = status_name
+    parser.values.test_id = parser.rargs.pop(0)
+
+
+def generate_stream_results(args, output_writer):
+    output_writer.startTestRun()
+
+    if args.attach_file:
+        reader = partial(args.attach_file.read, _CHUNK_SIZE)
+        this_file_hunk = reader()
+        next_file_hunk = reader()
+
+    is_first_packet = True
+    is_last_packet = False
+    while not is_last_packet:
+        write_status = output_writer.status
+
+        if is_first_packet:
+            if args.attach_file:
+                if args.mimetype:
+                    write_status = partial(write_status, mime_type=args.mimetype)
+            if args.tags:
+                write_status = partial(write_status, test_tags=set(args.tags))
+            write_status = partial(write_status, timestamp=create_timestamp())
+            if args.action not in _FINAL_ACTIONS:
+                write_status = partial(write_status, test_status=args.action)
+            is_first_packet = False
+
+        if args.attach_file:
+            filename = args.file_name or args.attach_file.name
+            write_status = partial(write_status, file_name=filename, file_bytes=this_file_hunk)
+            if next_file_hunk == b'':
+                write_status = partial(write_status, eof=True)
+                is_last_packet = True
+            else:
+                this_file_hunk = next_file_hunk
+                next_file_hunk = reader()
+        else:
+            is_last_packet = True
+
+        if args.test_id:
+            write_status = partial(write_status, test_id=args.test_id)
+
+        if is_last_packet:
+            if args.action in _FINAL_ACTIONS:
+                write_status = partial(write_status, test_status=args.action)
+
+        write_status()
+
+    output_writer.stopTestRun()
+
+
+def create_timestamp():
+    return datetime.datetime.now(UTC)
diff --git a/third_party/subunit/python/subunit/chunked.py b/third_party/subunit/python/subunit/chunked.py
new file mode 100644
index 0000000..b992129
--- /dev/null
+++ b/third_party/subunit/python/subunit/chunked.py
@@ -0,0 +1,185 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#  Copyright (C) 2011  Martin Pool <mbp at sourcefrog.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Encoder/decoder for http style chunked encoding."""
+
+from testtools.compat import _b
+
+empty = _b('')
+
+class Decoder(object):
+    """Decode chunked content to a byte stream."""
+
+    def __init__(self, output, strict=True):
+        """Create a decoder decoding to output.
+
+        :param output: A file-like object. Bytes written to the Decoder are
+            decoded to strip off the chunking and written to the output.
+            Up to a full write worth of data or a single control line may be
+            buffered (whichever is larger). The close method should be called
+            when no more data is available, to detect short streams; the
+            write method will return none-None when the end of a stream is
+            detected. The output object must accept bytes objects.
+
+        :param strict: If True (the default), the decoder will not knowingly
+            accept input that is not conformant to the HTTP specification.
+            (This does not imply that it will catch every nonconformance.)
+            If False, it will accept incorrect input that is still
+            unambiguous.
+        """
+        self.output = output
+        self.buffered_bytes = []
+        self.state = self._read_length
+        self.body_length = 0
+        self.strict = strict
+        self._match_chars = _b("0123456789abcdefABCDEF\r\n")
+        self._slash_n = _b('\n')
+        self._slash_r = _b('\r')
+        self._slash_rn = _b('\r\n')
+        self._slash_nr = _b('\n\r')
+
+    def close(self):
+        """Close the decoder.
+
+        :raises ValueError: If the stream is incomplete ValueError is raised.
+        """
+        if self.state != self._finished:
+            raise ValueError("incomplete stream")
+
+    def _finished(self):
+        """Finished reading, return any remaining bytes."""
+        if self.buffered_bytes:
+            buffered_bytes = self.buffered_bytes
+            self.buffered_bytes = []
+            return empty.join(buffered_bytes)
+        else:
+            raise ValueError("stream is finished")
+
+    def _read_body(self):
+        """Pass body bytes to the output."""
+        while self.body_length and self.buffered_bytes:
+            if self.body_length >= len(self.buffered_bytes[0]):
+                self.output.write(self.buffered_bytes[0])
+                self.body_length -= len(self.buffered_bytes[0])
+                del self.buffered_bytes[0]
+                # No more data available.
+                if not self.body_length:
+                    self.state = self._read_length
+            else:
+                self.output.write(self.buffered_bytes[0][:self.body_length])
+                self.buffered_bytes[0] = \
+                    self.buffered_bytes[0][self.body_length:]
+                self.body_length = 0
+                self.state = self._read_length
+                return self.state()
+
+    def _read_length(self):
+        """Try to decode a length from the bytes."""
+        count_chars = []
+        for bytes in self.buffered_bytes:
+            for pos in range(len(bytes)):
+                byte = bytes[pos:pos+1]
+                if byte not in self._match_chars:
+                    break
+                count_chars.append(byte)
+                if byte == self._slash_n:
+                    break
+        if not count_chars:
+            return
+        if count_chars[-1] != self._slash_n:
+            return
+        count_str = empty.join(count_chars)
+        if self.strict:
+            if count_str[-2:] != self._slash_rn:
+                raise ValueError("chunk header invalid: %r" % count_str)
+            if self._slash_r in count_str[:-2]:
+                raise ValueError("too many CRs in chunk header %r" % count_str)
+        self.body_length = int(count_str.rstrip(self._slash_nr), 16)
+        excess_bytes = len(count_str)
+        while excess_bytes:
+            if excess_bytes >= len(self.buffered_bytes[0]):
+                excess_bytes -= len(self.buffered_bytes[0])
+                del self.buffered_bytes[0]
+            else:
+                self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
+                excess_bytes = 0
+        if not self.body_length:
+            self.state = self._finished
+            if not self.buffered_bytes:
+                # May not call into self._finished with no buffered data.
+                return empty
+        else:
+            self.state = self._read_body
+        return self.state()
+
+    def write(self, bytes):
+        """Decode bytes to the output stream.
+
+        :raises ValueError: If the stream has already seen the end of file
+            marker.
+        :returns: None, or the excess bytes beyond the end of file marker.
+        """
+        if bytes:
+            self.buffered_bytes.append(bytes)
+        return self.state()
+
+
+class Encoder(object):
+    """Encode content to a stream using HTTP Chunked coding."""
+
+    def __init__(self, output):
+        """Create an encoder encoding to output.
+
+        :param output: A file-like object. Bytes written to the Encoder
+            will be encoded using HTTP chunking. Small writes may be buffered
+            and the ``close`` method must be called to finish the stream.
+        """
+        self.output = output
+        self.buffered_bytes = []
+        self.buffer_size = 0
+
+    def flush(self, extra_len=0):
+        """Flush the encoder to the output stream.
+
+        :param extra_len: Increase the size of the chunk by this many bytes
+            to allow for a subsequent write.
+        """
+        if not self.buffer_size and not extra_len:
+            return
+        buffered_bytes = self.buffered_bytes
+        buffer_size = self.buffer_size
+        self.buffered_bytes = []
+        self.buffer_size = 0
+        self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
+        if buffer_size:
+            self.output.write(empty.join(buffered_bytes))
+        return True
+
+    def write(self, bytes):
+        """Encode bytes to the output stream."""
+        bytes_len = len(bytes)
+        if self.buffer_size + bytes_len >= 65536:
+            self.flush(bytes_len)
+            self.output.write(bytes)
+        else:
+            self.buffered_bytes.append(bytes)
+            self.buffer_size += bytes_len
+
+    def close(self):
+        """Finish the stream. This does not close the output stream."""
+        self.flush()
+        self.output.write(_b("0\r\n"))
diff --git a/third_party/subunit/python/subunit/details.py b/third_party/subunit/python/subunit/details.py
new file mode 100644
index 0000000..ffdfd7a
--- /dev/null
+++ b/third_party/subunit/python/subunit/details.py
@@ -0,0 +1,119 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Handlers for outcome details."""
+
+from testtools import content, content_type
+from testtools.compat import _b, BytesIO
+
+from subunit import chunked
+
+end_marker = _b("]\n")
+quoted_marker = _b(" ]")
+empty = _b('')
+
+
+class DetailsParser(object):
+    """Base class/API reference for details parsing."""
+
+
+class SimpleDetailsParser(DetailsParser):
+    """Parser for single-part [] delimited details."""
+
+    def __init__(self, state):
+        self._message = _b("")
+        self._state = state
+
+    def lineReceived(self, line):
+        if line == end_marker:
+            self._state.endDetails()
+            return
+        if line[0:2] == quoted_marker:
+            # quoted ] start
+            self._message += line[1:]
+        else:
+            self._message += line
+
+    def get_details(self, style=None):
+        result = {}
+        if not style:
+            # We know that subunit/testtools serialise [] formatted
+            # tracebacks as utf8, but perhaps we need a ReplacingContent
+            # or something like that.
+            result['traceback'] = content.Content(
+                content_type.ContentType("text", "x-traceback",
+                {"charset": "utf8"}),
+                lambda:[self._message])
+        else:
+            if style == 'skip':
+                name = 'reason'
+            else:
+                name = 'message'
+            result[name] = content.Content(
+                content_type.ContentType("text", "plain"),
+                lambda:[self._message])
+        return result
+
+    def get_message(self):
+        return self._message
+
+
+class MultipartDetailsParser(DetailsParser):
+    """Parser for multi-part [] surrounded MIME typed chunked details."""
+
+    def __init__(self, state):
+        self._state = state
+        self._details = {}
+        self._parse_state = self._look_for_content
+
+    def _look_for_content(self, line):
+        if line == end_marker:
+            self._state.endDetails()
+            return
+        # TODO error handling
+        field, value = line[:-1].decode('utf8').split(' ', 1)
+        try:
+            main, sub = value.split('/')
+        except ValueError:
+            raise ValueError("Invalid MIME type %r" % value)
+        self._content_type = content_type.ContentType(main, sub)
+        self._parse_state = self._get_name
+
+    def _get_name(self, line):
+        self._name = line[:-1].decode('utf8')
+        self._body = BytesIO()
+        self._chunk_parser = chunked.Decoder(self._body)
+        self._parse_state = self._feed_chunks
+
+    def _feed_chunks(self, line):
+        residue = self._chunk_parser.write(line)
+        if residue is not None:
+            # Line based use always ends on no residue.
+            assert residue == empty, 'residue: %r' % (residue,)
+            body = self._body
+            self._details[self._name] = content.Content(
+                self._content_type, lambda:[body.getvalue()])
+            self._chunk_parser.close()
+            self._parse_state = self._look_for_content
+
+    def get_details(self, for_skip=False):
+        return self._details
+
+    def get_message(self):
+        return None
+
+    def lineReceived(self, line):
+        self._parse_state(line)
diff --git a/third_party/subunit/python/subunit/filters.py b/third_party/subunit/python/subunit/filters.py
new file mode 100644
index 0000000..48f7948
--- /dev/null
+++ b/third_party/subunit/python/subunit/filters.py
@@ -0,0 +1,206 @@
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+
+from optparse import OptionParser
+import sys
+
+from extras import safe_hasattr
+from testtools import CopyStreamResult, StreamResult, StreamResultRouter
+
+from subunit import (
+    DiscardStream, ProtocolTestCase, ByteStreamToStreamResult,
+    StreamResultToBytes,
+    )
+from subunit.test_results import CatFiles
+
+
+def make_options(description):
+    parser = OptionParser(description=description)
+    parser.add_option(
+        "--no-passthrough", action="store_true",
+        help="Hide all non subunit input.", default=False,
+        dest="no_passthrough")
+    parser.add_option(
+        "-o", "--output-to",
+        help="Send the output to this path rather than stdout.")
+    parser.add_option(
+        "-f", "--forward", action="store_true", default=False,
+        help="Forward subunit stream on stdout. When set, received "
+            "non-subunit output will be encapsulated in subunit.")
+    return parser
+
+
+def run_tests_from_stream(input_stream, result, passthrough_stream=None,
+    forward_stream=None, protocol_version=1, passthrough_subunit=True):
+    """Run tests from a subunit input stream through 'result'.
+
+    Non-test events - top level file attachments - are expected to be
+    dropped by v2 StreamResults at the present time (as all the analysis code
+    is in ExtendedTestResult API's), so to implement passthrough_stream they
+    are diverted and copied directly when that is set.
+
+    :param input_stream: A stream containing subunit input.
+    :param result: A TestResult that will receive the test events.
+        NB: This should be an ExtendedTestResult for v1 and a StreamResult for
+        v2.
+    :param passthrough_stream: All non-subunit input received will be
+        sent to this stream.  If not provided, uses the ``TestProtocolServer``
+        default, which is ``sys.stdout``.
+    :param forward_stream: All subunit input received will be forwarded
+        to this stream. If not provided, uses the ``TestProtocolServer``
+        default, which is to not forward any input. Do not set this when
+        transforming the stream - items would be double-reported.
+    :param protocol_version: What version of the subunit protocol to expect.
+    :param passthrough_subunit: If True, passthrough should be as subunit
+        otherwise unwrap it. Only has effect when forward_stream is None.
+        (when forwarding as subunit non-subunit input is always turned into
+        subunit)
+    """
+    if 1==protocol_version:
+        test = ProtocolTestCase(
+            input_stream, passthrough=passthrough_stream,
+            forward=forward_stream)
+    elif 2==protocol_version:
+        # In all cases we encapsulate unknown inputs.
+        if forward_stream is not None:
+            # Send events to forward_stream as subunit.
+            forward_result = StreamResultToBytes(forward_stream)
+            # If we're passing non-subunit through, copy:
+            if passthrough_stream is None:
+                # Not passing non-test events - split them off to nothing.
+                router = StreamResultRouter(forward_result)
+                router.add_rule(StreamResult(), 'test_id', test_id=None)
+                result = CopyStreamResult([router, result])
+            else:
+                # otherwise, copy all events to forward_result
+                result = CopyStreamResult([forward_result, result])
+        elif passthrough_stream is not None:
+            if not passthrough_subunit:
+                # Route non-test events to passthrough_stream, unwrapping them for
+                # display.
+                passthrough_result = CatFiles(passthrough_stream)
+            else:
+                passthrough_result = StreamResultToBytes(passthrough_stream)
+            result = StreamResultRouter(result)
+            result.add_rule(passthrough_result, 'test_id', test_id=None)
+        test = ByteStreamToStreamResult(input_stream,
+            non_subunit_name='stdout')
+    else:
+        raise Exception("Unknown protocol version.")
+    result.startTestRun()
+    test.run(result)
+    result.stopTestRun()
+
+
+def filter_by_result(result_factory, output_path, passthrough, forward,
+                     input_stream=sys.stdin, protocol_version=1,
+                     passthrough_subunit=True):
+    """Filter an input stream using a test result.
+
+    :param result_factory: A callable that when passed an output stream
+        returns a TestResult.  It is expected that this result will output
+        to the given stream.
+    :param output_path: A path send output to.  If None, output will be go
+        to ``sys.stdout``.
+    :param passthrough: If True, all non-subunit input will be sent to
+        ``sys.stdout``.  If False, that input will be discarded.
+    :param forward: If True, all subunit input will be forwarded directly to
+        ``sys.stdout`` as well as to the ``TestResult``.
+    :param input_stream: The source of subunit input.  Defaults to
+        ``sys.stdin``.
+    :param protocol_version: The subunit protocol version to expect.
+    :param passthrough_subunit: If True, passthrough should be as subunit.
+    :return: A test result with the results of the run.
+    """
+    if passthrough:
+        passthrough_stream = sys.stdout
+    else:
+        if 1==protocol_version:
+            passthrough_stream = DiscardStream()
+        else:
+            passthrough_stream = None
+
+    if forward:
+        forward_stream = sys.stdout
+    elif 1==protocol_version:
+        forward_stream = DiscardStream()
+    else:
+        forward_stream = None
+
+    if output_path is None:
+        output_to = sys.stdout
+    else:
+        output_to = file(output_path, 'wb')
+
+    try:
+        result = result_factory(output_to)
+        run_tests_from_stream(
+            input_stream, result, passthrough_stream, forward_stream,
+            protocol_version=protocol_version,
+            passthrough_subunit=passthrough_subunit)
+    finally:
+        if output_path:
+            output_to.close()
+    return result
+
+
+def run_filter_script(result_factory, description, post_run_hook=None,
+    protocol_version=1, passthrough_subunit=True):
+    """Main function for simple subunit filter scripts.
+
+    Many subunit filter scripts take a stream of subunit input and use a
+    TestResult to handle the events generated by that stream.  This function
+    wraps a lot of the boiler-plate around that by making a script with
+    options for handling passthrough information and stream forwarding, and
+    that will exit with a successful return code (i.e. 0) if the input stream
+    represents a successful test run.
+
+    :param result_factory: A callable that takes an output stream and returns
+        a test result that outputs to that stream.
+    :param description: A description of the filter script.
+    :param protocol_version: What protocol version to consume/emit.
+    :param passthrough_subunit: If True, passthrough should be as subunit.
+    """
+    parser = make_options(description)
+    (options, args) = parser.parse_args()
+    result = filter_by_result(
+        result_factory, options.output_to, not options.no_passthrough,
+        options.forward, protocol_version=protocol_version,
+        passthrough_subunit=passthrough_subunit,
+        input_stream=find_stream(sys.stdin, args))
+    if post_run_hook:
+        post_run_hook(result)
+    if not safe_hasattr(result, 'wasSuccessful'):
+        result = result.decorated
+    if result.wasSuccessful():
+        sys.exit(0)
+    else:
+        sys.exit(1)
+
+
+def find_stream(stdin, argv):
+    """Find a stream to use as input for filters.
+
+    :param stdin: Standard in - used if no files are named in argv.
+    :param argv: Command line arguments after option parsing. If one file
+        is named, that is opened in read only binary mode and returned.
+        A missing file will raise an exception, as will multiple file names.
+    """
+    assert len(argv) < 2, "Too many filenames."
+    if argv:
+        return open(argv[0], 'rb')
+    else:
+        return stdin
diff --git a/third_party/subunit/python/subunit/iso8601.py b/third_party/subunit/python/subunit/iso8601.py
new file mode 100644
index 0000000..700d016
--- /dev/null
+++ b/third_party/subunit/python/subunit/iso8601.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2007 Michael Twomey
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+"""ISO 8601 date time string parsing
+
+Basic usage:
+>>> import iso8601
+>>> iso8601.parse_date("2007-01-25T12:00:00Z")
+datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
+>>>
+
+"""
+
+from datetime import datetime, timedelta, tzinfo
+import re
+import sys
+
+__all__ = ["parse_date", "ParseError"]
+
+# Adapted from http://delete.me.uk/2005/03/iso8601.html
+ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
+    r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
+    r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
+)
+TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
+ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
+TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
+
+zulu = "Z".encode('latin-1')
+minus = "-".encode('latin-1')
+
+if sys.version_info < (3, 0):
+    bytes = str
+
+
+class ParseError(Exception):
+    """Raised when there is a problem parsing a date string"""
+
+# Yoinked from python docs
+ZERO = timedelta(0)
+class Utc(tzinfo):
+    """UTC
+
+    """
+    def utcoffset(self, dt):
+        return ZERO
+
+    def tzname(self, dt):
+        return "UTC"
+
+    def dst(self, dt):
+        return ZERO
+UTC = Utc()
+
+class FixedOffset(tzinfo):
+    """Fixed offset in hours and minutes from UTC
+
+    """
+    def __init__(self, offset_hours, offset_minutes, name):
+        self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
+        self.__name = name
+
+    def utcoffset(self, dt):
+        return self.__offset
+
+    def tzname(self, dt):
+        return self.__name
+
+    def dst(self, dt):
+        return ZERO
+
+    def __repr__(self):
+        return "<FixedOffset %r>" % self.__name
+
+def parse_timezone(tzstring, default_timezone=UTC):
+    """Parses ISO 8601 time zone specs into tzinfo offsets
+
+    """
+    if tzstring == zulu:
+        return default_timezone
+    # This isn't strictly correct, but it's common to encounter dates without
+    # timezones so I'll assume the default (which defaults to UTC).
+    # Addresses issue 4.
+    if tzstring is None:
+        return default_timezone
+    m = TIMEZONE_REGEX.match(tzstring)
+    prefix, hours, minutes = m.groups()
+    hours, minutes = int(hours), int(minutes)
+    if prefix == minus:
+        hours = -hours
+        minutes = -minutes
+    return FixedOffset(hours, minutes, tzstring)
+
+def parse_date(datestring, default_timezone=UTC):
+    """Parses ISO 8601 dates into datetime objects
+
+    The timezone is parsed from the date string. However it is quite common to
+    have dates without a timezone (not strictly correct). In this case the
+    default timezone specified in default_timezone is used. This is UTC by
+    default.
+    """
+    if not isinstance(datestring, bytes):
+        raise ParseError("Expecting bytes %r" % datestring)
+    m = ISO8601_REGEX.match(datestring)
+    if not m:
+        raise ParseError("Unable to parse date string %r" % datestring)
+    groups = m.groupdict()
+    tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
+    if groups["fraction"] is None:
+        groups["fraction"] = 0
+    else:
+        groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
+    return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
+        int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
+        int(groups["fraction"]), tz)
diff --git a/third_party/subunit/python/subunit/progress_model.py b/third_party/subunit/python/subunit/progress_model.py
new file mode 100644
index 0000000..3e721db
--- /dev/null
+++ b/third_party/subunit/python/subunit/progress_model.py
@@ -0,0 +1,105 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Support for dealing with progress state."""
+
+class ProgressModel(object):
+    """A model of progress indicators as subunit defines it.
+
+    Instances of this class represent a single logical operation that is
+    progressing. The operation may have many steps, and some of those steps may
+    supply their own progress information. ProgressModel uses a nested concept
+    where the overall state can be pushed, creating new starting state, and
+    later pushed to return to the prior state. Many user interfaces will want
+    to display an overall summary though, and accordingly the pos() and width()
+    methods return overall summary information rather than information on the
+    current subtask.
+
+    The default state is 0/0 - indicating that the overall progress is unknown.
+    Anytime the denominator of pos/width is 0, rendering of a ProgressModel
+    should should take this into consideration.
+
+    :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
+        pos, width, overall_numerator, overall_denominator. The overall fields
+        store the calculated overall numerator and denominator for the state
+        that was pushed.
+    """
+
+    def __init__(self):
+        """Create a ProgressModel.
+
+        The new model has no progress data at all - it will claim a summary
+        width of zero and position of 0.
+        """
+        self._tasks = []
+        self.push()
+
+    def adjust_width(self, offset):
+        """Adjust the with of the current subtask."""
+        self._tasks[-1][1] += offset
+
+    def advance(self):
+        """Advance the current subtask."""
+        self._tasks[-1][0] += 1
+
+    def pop(self):
+        """Pop a subtask off the ProgressModel.
+
+        See push for a description of how push and pop work.
+        """
+        self._tasks.pop()
+
+    def pos(self):
+        """Return how far through the operation has progressed."""
+        if not self._tasks:
+            return 0
+        task = self._tasks[-1]
+        if len(self._tasks) > 1:
+            # scale up the overall pos by the current task or preserve it if
+            # no current width is known.
+            offset = task[2] * (task[1] or 1)
+        else:
+            offset = 0
+        return offset + task[0]
+
+    def push(self):
+        """Push a new subtask.
+
+        After pushing a new subtask, the overall progress hasn't changed. Calls
+        to adjust_width, advance, set_width will only after the progress within
+        the range that calling 'advance' would have before - the subtask
+        represents progressing one step in the earlier task.
+
+        Call pop() to restore the progress model to the state before push was
+        called.
+        """
+        self._tasks.append([0, 0, self.pos(), self.width()])
+
+    def set_width(self, width):
+        """Set the width of the current subtask."""
+        self._tasks[-1][1] = width
+
+    def width(self):
+        """Return the total width of the operation."""
+        if not self._tasks:
+            return 0
+        task = self._tasks[-1]
+        if len(self._tasks) > 1:
+            # scale up the overall width by the current task or preserve it if
+            # no current width is known.
+            return task[3] * (task[1] or 1)
+        else:
+            return task[1]
diff --git a/third_party/subunit/python/subunit/run.py b/third_party/subunit/python/subunit/run.py
new file mode 100755
index 0000000..ee31fe9
--- /dev/null
+++ b/third_party/subunit/python/subunit/run.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Simple subunit testrunner for python
+# Copyright (C) Jelmer Vernooij <jelmer at samba.org> 2007
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Run a unittest testcase reporting results as Subunit.
+
+  $ python -m subunit.run mylib.tests.test_suite
+"""
+
+import io
+import os
+import sys
+
+from testtools import ExtendedToStreamDecorator
+from testtools.testsuite import iterate_tests
+
+from subunit import StreamResultToBytes, get_default_formatter
+from subunit.test_results import AutoTimingTestResultDecorator
+from testtools.run import (
+    BUFFEROUTPUT,
+    CATCHBREAK,
+    FAILFAST,
+    list_test,
+    TestProgram,
+    USAGE_AS_MAIN,
+    )
+
+
+class SubunitTestRunner(object):
+    def __init__(self, verbosity=None, failfast=None, buffer=None, stream=None,
+        stdout=None):
+        """Create a TestToolsTestRunner.
+
+        :param verbosity: Ignored.
+        :param failfast: Stop running tests at the first failure.
+        :param buffer: Ignored.
+        :param stream: Upstream unittest stream parameter.
+        :param stdout: Testtools stream parameter.
+
+        Either stream or stdout can be supplied, and stream will take
+        precedence.
+        """
+        self.failfast = failfast
+        self.stream = stream or stdout or sys.stdout
+
+    def run(self, test):
+        "Run the given test case or test suite."
+        result, _ = self._list(test)
+        result = ExtendedToStreamDecorator(result)
+        result = AutoTimingTestResultDecorator(result)
+        if self.failfast is not None:
+            result.failfast = self.failfast
+        result.startTestRun()
+        try:
+            test(result)
+        finally:
+            result.stopTestRun()
+        return result
+
+    def list(self, test):
+        "List the test."
+        result, errors = self._list(test)
+        if errors:
+            failed_descr = '\n'.join(errors).encode('utf8')
+            result.status(file_name="import errors", runnable=False,
+                file_bytes=failed_descr, mime_type="text/plain;charset=utf8")
+            sys.exit(2)
+
+    def _list(self, test):
+        test_ids, errors = list_test(test)
+        try:
+            fileno = self.stream.fileno()
+        except:
+            fileno = None
+        if fileno is not None:
+            stream = os.fdopen(fileno, 'wb', 0)
+        else:
+            stream = self.stream
+        result = StreamResultToBytes(stream)
+        for test_id in test_ids:
+            result.status(test_id=test_id, test_status='exists')
+        return result, errors
+
+
+class SubunitTestProgram(TestProgram):
+
+    USAGE = USAGE_AS_MAIN
+
+    def usageExit(self, msg=None):
+        if msg:
+            print (msg)
+        usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
+                 'buffer': ''}
+        if self.failfast != False:
+            usage['failfast'] = FAILFAST
+        if self.catchbreak != False:
+            usage['catchbreak'] = CATCHBREAK
+        if self.buffer != False:
+            usage['buffer'] = BUFFEROUTPUT
+        usage_text = self.USAGE % usage
+        usage_lines = usage_text.split('\n')
+        usage_lines.insert(2, "Run a test suite with a subunit reporter.")
+        usage_lines.insert(3, "")
+        print('\n'.join(usage_lines))
+        sys.exit(2)
+
+
+def main(argv=None, stdout=None):
+    if argv is None:
+        argv = sys.argv
+    runner = SubunitTestRunner
+    # stdout is None except in unit tests.
+    if stdout is None:
+        stdout = sys.stdout
+        # XXX: This is broken code- SUBUNIT_FORMATTER is not being honoured.
+        stream = get_default_formatter()
+        # Disable the default buffering, for Python 2.x where pdb doesn't do it
+        # on non-ttys.
+        if hasattr(stdout, 'fileno'):
+            # Patch stdout to be unbuffered, so that pdb works well on 2.6/2.7.
+            binstdout = io.open(stdout.fileno(), 'wb', 0)
+            if sys.version_info[0] > 2:
+                sys.stdout = io.TextIOWrapper(binstdout, encoding=sys.stdout.encoding)
+            else:
+                sys.stdout = binstdout
+            stdout = sys.stdout
+    SubunitTestProgram(module=None, argv=argv, testRunner=runner,
+        stdout=stdout, exit=False)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/third_party/subunit/python/subunit/test_results.py b/third_party/subunit/python/subunit/test_results.py
new file mode 100644
index 0000000..b3ca968
--- /dev/null
+++ b/third_party/subunit/python/subunit/test_results.py
@@ -0,0 +1,728 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""TestResult helper classes used to by subunit."""
+
+import csv
+import datetime
+
+import testtools
+from testtools.content import (
+    text_content,
+    TracebackContent,
+    )
+from testtools import StreamResult
+
+from subunit import iso8601
+import subunit
+
+
+# NOT a TestResult, because we are implementing the interface, not inheriting
+# it.
+class TestResultDecorator(object):
+    """General pass-through decorator.
+
+    This provides a base that other TestResults can inherit from to
+    gain basic forwarding functionality. It also takes care of
+    handling the case where the target doesn't support newer methods
+    or features by degrading them.
+    """
+
+    # XXX: Since lp:testtools r250, this is in testtools. Once it's released,
+    # we should gut this and just use that.
+
+    def __init__(self, decorated):
+        """Create a TestResultDecorator forwarding to decorated."""
+        # Make every decorator degrade gracefully.
+        self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
+
+    def startTest(self, test):
+        return self.decorated.startTest(test)
+
+    def startTestRun(self):
+        return self.decorated.startTestRun()
+
+    def stopTest(self, test):
+        return self.decorated.stopTest(test)
+
+    def stopTestRun(self):
+        return self.decorated.stopTestRun()
+
+    def addError(self, test, err=None, details=None):
+        return self.decorated.addError(test, err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        return self.decorated.addFailure(test, err, details=details)
+
+    def addSuccess(self, test, details=None):
+        return self.decorated.addSuccess(test, details=details)
+
+    def addSkip(self, test, reason=None, details=None):
+        return self.decorated.addSkip(test, reason, details=details)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        return self.decorated.addExpectedFailure(test, err, details=details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        return self.decorated.addUnexpectedSuccess(test, details=details)
+
+    def _get_failfast(self):
+        return getattr(self.decorated, 'failfast', False)
+
+    def _set_failfast(self, value):
+        self.decorated.failfast = value
+    failfast = property(_get_failfast, _set_failfast)
+
+    def progress(self, offset, whence):
+        return self.decorated.progress(offset, whence)
+
+    def wasSuccessful(self):
+        return self.decorated.wasSuccessful()
+
+    @property
+    def shouldStop(self):
+        return self.decorated.shouldStop
+
+    def stop(self):
+        return self.decorated.stop()
+
+    @property
+    def testsRun(self):
+        return self.decorated.testsRun
+
+    def tags(self, new_tags, gone_tags):
+        return self.decorated.tags(new_tags, gone_tags)
+
+    def time(self, a_datetime):
+        return self.decorated.time(a_datetime)
+
+
+class HookedTestResultDecorator(TestResultDecorator):
+    """A TestResult which calls a hook on every event."""
+
+    def __init__(self, decorated):
+        self.super = super(HookedTestResultDecorator, self)
+        self.super.__init__(decorated)
+
+    def startTest(self, test):
+        self._before_event()
+        return self.super.startTest(test)
+
+    def startTestRun(self):
+        self._before_event()
+        return self.super.startTestRun()
+
+    def stopTest(self, test):
+        self._before_event()
+        return self.super.stopTest(test)
+
+    def stopTestRun(self):
+        self._before_event()
+        return self.super.stopTestRun()
+
+    def addError(self, test, err=None, details=None):
+        self._before_event()
+        return self.super.addError(test, err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        self._before_event()
+        return self.super.addFailure(test, err, details=details)
+
+    def addSuccess(self, test, details=None):
+        self._before_event()
+        return self.super.addSuccess(test, details=details)
+
+    def addSkip(self, test, reason=None, details=None):
+        self._before_event()
+        return self.super.addSkip(test, reason, details=details)
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._before_event()
+        return self.super.addExpectedFailure(test, err, details=details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._before_event()
+        return self.super.addUnexpectedSuccess(test, details=details)
+
+    def progress(self, offset, whence):
+        self._before_event()
+        return self.super.progress(offset, whence)
+
+    def wasSuccessful(self):
+        self._before_event()
+        return self.super.wasSuccessful()
+
+    @property
+    def shouldStop(self):
+        self._before_event()
+        return self.super.shouldStop
+
+    def stop(self):
+        self._before_event()
+        return self.super.stop()
+
+    def time(self, a_datetime):
+        self._before_event()
+        return self.super.time(a_datetime)
+
+
+class AutoTimingTestResultDecorator(HookedTestResultDecorator):
+    """Decorate a TestResult to add time events to a test run.
+
+    By default this will cause a time event before every test event,
+    but if explicit time data is being provided by the test run, then
+    this decorator will turn itself off to prevent causing confusion.
+    """
+
+    def __init__(self, decorated):
+        self._time = None
+        super(AutoTimingTestResultDecorator, self).__init__(decorated)
+
+    def _before_event(self):
+        time = self._time
+        if time is not None:
+            return
+        time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
+        self.decorated.time(time)
+
+    def progress(self, offset, whence):
+        return self.decorated.progress(offset, whence)
+
+    @property
+    def shouldStop(self):
+        return self.decorated.shouldStop
+
+    def time(self, a_datetime):
+        """Provide a timestamp for the current test activity.
+
+        :param a_datetime: If None, automatically add timestamps before every
+            event (this is the default behaviour if time() is not called at
+            all).  If not None, pass the provided time onto the decorated
+            result object and disable automatic timestamps.
+        """
+        self._time = a_datetime
+        return self.decorated.time(a_datetime)
+
+
+class TagsMixin(object):
+
+    def __init__(self):
+        self._clear_tags()
+
+    def _clear_tags(self):
+        self._global_tags = set(), set()
+        self._test_tags = None
+
+    def _get_active_tags(self):
+        global_new, global_gone = self._global_tags
+        if self._test_tags is None:
+            return set(global_new)
+        test_new, test_gone = self._test_tags
+        return global_new.difference(test_gone).union(test_new)
+
+    def _get_current_scope(self):
+        if self._test_tags:
+            return self._test_tags
+        return self._global_tags
+
+    def _flush_current_scope(self, tag_receiver):
+        new_tags, gone_tags = self._get_current_scope()
+        if new_tags or gone_tags:
+            tag_receiver.tags(new_tags, gone_tags)
+        if self._test_tags:
+            self._test_tags = set(), set()
+        else:
+            self._global_tags = set(), set()
+
+    def startTestRun(self):
+        self._clear_tags()
+
+    def startTest(self, test):
+        self._test_tags = set(), set()
+
+    def stopTest(self, test):
+        self._test_tags = None
+
+    def tags(self, new_tags, gone_tags):
+        """Handle tag instructions.
+
+        Adds and removes tags as appropriate. If a test is currently running,
+        tags are not affected for subsequent tests.
+
+        :param new_tags: Tags to add,
+        :param gone_tags: Tags to remove.
+        """
+        current_new_tags, current_gone_tags = self._get_current_scope()
+        current_new_tags.update(new_tags)
+        current_new_tags.difference_update(gone_tags)
+        current_gone_tags.update(gone_tags)
+        current_gone_tags.difference_update(new_tags)
+
+
+class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
+    """Collapses many 'tags' calls into one where possible."""
+
+    def __init__(self, result):
+        super(TagCollapsingDecorator, self).__init__(result)
+        self._clear_tags()
+
+    def _before_event(self):
+        self._flush_current_scope(self.decorated)
+
+    def tags(self, new_tags, gone_tags):
+        TagsMixin.tags(self, new_tags, gone_tags)
+
+
+class TimeCollapsingDecorator(HookedTestResultDecorator):
+    """Only pass on the first and last of a consecutive sequence of times."""
+
+    def __init__(self, decorated):
+        super(TimeCollapsingDecorator, self).__init__(decorated)
+        self._last_received_time = None
+        self._last_sent_time = None
+
+    def _before_event(self):
+        if self._last_received_time is None:
+            return
+        if self._last_received_time != self._last_sent_time:
+            self.decorated.time(self._last_received_time)
+            self._last_sent_time = self._last_received_time
+        self._last_received_time = None
+
+    def time(self, a_time):
+        # Don't upcall, because we don't want to call _before_event, it's only
+        # for non-time events.
+        if self._last_received_time is None:
+            self.decorated.time(a_time)
+            self._last_sent_time = a_time
+        self._last_received_time = a_time
+
+
+def and_predicates(predicates):
+    """Return a predicate that is true iff all predicates are true."""
+    # XXX: Should probably be in testtools to be better used by matchers. jml
+    return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
+
+
+def make_tag_filter(with_tags, without_tags):
+    """Make a callback that checks tests against tags."""
+
+    with_tags = with_tags and set(with_tags) or None
+    without_tags = without_tags and set(without_tags) or None
+
+    def check_tags(test, outcome, err, details, tags):
+        if with_tags and not with_tags <= tags:
+            return False
+        if without_tags and bool(without_tags & tags):
+            return False
+        return True
+
+    return check_tags
+
+
+class _PredicateFilter(TestResultDecorator, TagsMixin):
+
+    def __init__(self, result, predicate):
+        super(_PredicateFilter, self).__init__(result)
+        self._clear_tags()
+        self.decorated = TimeCollapsingDecorator(
+            TagCollapsingDecorator(self.decorated))
+        self._predicate = predicate
+        # The current test (for filtering tags)
+        self._current_test = None
+        # Has the current test been filtered (for outputting test tags)
+        self._current_test_filtered = None
+        # Calls to this result that we don't know whether to forward on yet.
+        self._buffered_calls = []
+
+    def filter_predicate(self, test, outcome, error, details):
+        return self._predicate(
+            test, outcome, error, details, self._get_active_tags())
+
+    def addError(self, test, err=None, details=None):
+        if (self.filter_predicate(test, 'error', err, details)):
+            self._buffered_calls.append(
+                ('addError', [test, err], {'details': details}))
+        else:
+            self._filtered()
+
+    def addFailure(self, test, err=None, details=None):
+        if (self.filter_predicate(test, 'failure', err, details)):
+            self._buffered_calls.append(
+                ('addFailure', [test, err], {'details': details}))
+        else:
+            self._filtered()
+
+    def addSkip(self, test, reason=None, details=None):
+        if (self.filter_predicate(test, 'skip', reason, details)):
+            self._buffered_calls.append(
+                ('addSkip', [test, reason], {'details': details}))
+        else:
+            self._filtered()
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        if self.filter_predicate(test, 'expectedfailure', err, details):
+            self._buffered_calls.append(
+                ('addExpectedFailure', [test, err], {'details': details}))
+        else:
+            self._filtered()
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self._buffered_calls.append(
+            ('addUnexpectedSuccess', [test], {'details': details}))
+
+    def addSuccess(self, test, details=None):
+        if (self.filter_predicate(test, 'success', None, details)):
+            self._buffered_calls.append(
+                ('addSuccess', [test], {'details': details}))
+        else:
+            self._filtered()
+
+    def _filtered(self):
+        self._current_test_filtered = True
+
+    def startTest(self, test):
+        """Start a test.
+
+        Not directly passed to the client, but used for handling of tags
+        correctly.
+        """
+        TagsMixin.startTest(self, test)
+        self._current_test = test
+        self._current_test_filtered = False
+        self._buffered_calls.append(('startTest', [test], {}))
+
+    def stopTest(self, test):
+        """Stop a test.
+
+        Not directly passed to the client, but used for handling of tags
+        correctly.
+        """
+        if not self._current_test_filtered:
+            for method, args, kwargs in self._buffered_calls:
+                getattr(self.decorated, method)(*args, **kwargs)
+            self.decorated.stopTest(test)
+        self._current_test = None
+        self._current_test_filtered = None
+        self._buffered_calls = []
+        TagsMixin.stopTest(self, test)
+
+    def tags(self, new_tags, gone_tags):
+        TagsMixin.tags(self, new_tags, gone_tags)
+        if self._current_test is not None:
+            self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
+        else:
+            return super(_PredicateFilter, self).tags(new_tags, gone_tags)
+
+    def time(self, a_time):
+        return self.decorated.time(a_time)
+
+    def id_to_orig_id(self, id):
+        if id.startswith("subunit.RemotedTestCase."):
+            return id[len("subunit.RemotedTestCase."):]
+        return id
+
+
+class TestResultFilter(TestResultDecorator):
+    """A pyunit TestResult interface implementation which filters tests.
+
+    Tests that pass the filter are handed on to another TestResult instance
+    for further processing/reporting. To obtain the filtered results,
+    the other instance must be interrogated.
+
+    :ivar result: The result that tests are passed to after filtering.
+    :ivar filter_predicate: The callback run to decide whether to pass
+        a result.
+    """
+
+    def __init__(self, result, filter_error=False, filter_failure=False,
+        filter_success=True, filter_skip=False, filter_xfail=False,
+        filter_predicate=None, fixup_expected_failures=None):
+        """Create a FilterResult object filtering to result.
+
+        :param filter_error: Filter out errors.
+        :param filter_failure: Filter out failures.
+        :param filter_success: Filter out successful tests.
+        :param filter_skip: Filter out skipped tests.
+        :param filter_xfail: Filter out expected failure tests.
+        :param filter_predicate: A callable taking (test, outcome, err,
+            details, tags) and returning True if the result should be passed
+            through.  err and details may be none if no error or extra
+            metadata is available. outcome is the name of the outcome such
+            as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
+            are still supported but should be updated to accept the tags
+            parameter for efficiency.
+        :param fixup_expected_failures: Set of test ids to consider known
+            failing.
+        """
+        predicates = []
+        if filter_error:
+            predicates.append(
+                lambda t, outcome, e, d, tags: outcome != 'error')
+        if filter_failure:
+            predicates.append(
+                lambda t, outcome, e, d, tags: outcome != 'failure')
+        if filter_success:
+            predicates.append(
+                lambda t, outcome, e, d, tags: outcome != 'success')
+        if filter_skip:
+            predicates.append(
+                lambda t, outcome, e, d, tags: outcome != 'skip')
+        if filter_xfail:
+            predicates.append(
+                lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
+        if filter_predicate is not None:
+            def compat(test, outcome, error, details, tags):
+                # 0.0.7 and earlier did not support the 'tags' parameter.
+                try:
+                    return filter_predicate(
+                        test, outcome, error, details, tags)
+                except TypeError:
+                    return filter_predicate(test, outcome, error, details)
+            predicates.append(compat)
+        predicate = and_predicates(predicates)
+        super(TestResultFilter, self).__init__(
+            _PredicateFilter(result, predicate))
+        if fixup_expected_failures is None:
+            self._fixup_expected_failures = frozenset()
+        else:
+            self._fixup_expected_failures = fixup_expected_failures
+
+    def addError(self, test, err=None, details=None):
+        if self._failure_expected(test):
+            self.addExpectedFailure(test, err=err, details=details)
+        else:
+            super(TestResultFilter, self).addError(
+                test, err=err, details=details)
+
+    def addFailure(self, test, err=None, details=None):
+        if self._failure_expected(test):
+            self.addExpectedFailure(test, err=err, details=details)
+        else:
+            super(TestResultFilter, self).addFailure(
+                test, err=err, details=details)
+
+    def addSuccess(self, test, details=None):
+        if self._failure_expected(test):
+            self.addUnexpectedSuccess(test, details=details)
+        else:
+            super(TestResultFilter, self).addSuccess(test, details=details)
+
+    def _failure_expected(self, test):
+        return (test.id() in self._fixup_expected_failures)
+
+
+class TestIdPrintingResult(testtools.TestResult):
+    """Print test ids to a stream.
+
+    Implements both TestResult and StreamResult, for compatibility.
+    """
+
+    def __init__(self, stream, show_times=False, show_exists=False):
+        """Create a FilterResult object outputting to stream."""
+        super(TestIdPrintingResult, self).__init__()
+        self._stream = stream
+        self.show_exists = show_exists
+        self.show_times = show_times
+
+    def startTestRun(self):
+        self.failed_tests = 0
+        self.__time = None
+        self._test = None
+        self._test_duration = 0
+        self._active_tests = {}
+
+    def addError(self, test, err):
+        self.failed_tests += 1
+        self._test = test
+
+    def addFailure(self, test, err):
+        self.failed_tests += 1
+        self._test = test
+
+    def addSuccess(self, test):
+        self._test = test
+
+    def addSkip(self, test, reason=None, details=None):
+        self._test = test
+
+    def addUnexpectedSuccess(self, test, details=None):
+        self.failed_tests += 1
+        self._test = test
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        self._test = test
+
+    def reportTest(self, test_id, duration):
+        if self.show_times:
+            seconds = duration.seconds
+            seconds += duration.days * 3600 * 24
+            seconds += duration.microseconds / 1000000.0
+            self._stream.write(test_id + ' %0.3f\n' % seconds)
+        else:
+            self._stream.write(test_id + '\n')
+
+    def startTest(self, test):
+        self._start_time = self._time()
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        if not test_id:
+            return
+        if timestamp is not None:
+            self.time(timestamp)
+        if test_status=='exists':
+            if self.show_exists:
+                self.reportTest(test_id, 0)
+        elif test_status in ('inprogress', None):
+            self._active_tests[test_id] = self._time()
+        else:
+            self._end_test(test_id)
+
+    def _end_test(self, test_id):
+        test_start = self._active_tests.pop(test_id, None)
+        if not test_start:
+            test_duration = 0
+        else:
+            test_duration = self._time() - test_start
+        self.reportTest(test_id, test_duration)
+
+    def stopTest(self, test):
+        test_duration = self._time() - self._start_time
+        self.reportTest(self._test.id(), test_duration)
+
+    def time(self, time):
+        self.__time = time
+
+    def _time(self):
+        return self.__time
+
+    def wasSuccessful(self):
+        "Tells whether or not this result was a success"
+        return self.failed_tests == 0
+
+    def stopTestRun(self):
+        for test_id in list(self._active_tests.keys()):
+            self._end_test(test_id)
+
+
+class TestByTestResult(testtools.TestResult):
+    """Call something every time a test completes."""
+
+# XXX: In testtools since lp:testtools r249.  Once that's released, just
+# import that.
+
+    def __init__(self, on_test):
+        """Construct a ``TestByTestResult``.
+
+        :param on_test: A callable that take a test case, a status (one of
+            "success", "failure", "error", "skip", or "xfail"), a start time
+            (a ``datetime`` with timezone), a stop time, an iterable of tags,
+            and a details dict. Is called at the end of each test (i.e. on
+            ``stopTest``) with the accumulated values for that test.
+        """
+        super(TestByTestResult, self).__init__()
+        self._on_test = on_test
+
+    def startTest(self, test):
+        super(TestByTestResult, self).startTest(test)
+        self._start_time = self._now()
+        # There's no supported (i.e. tested) behaviour that relies on these
+        # being set, but it makes me more comfortable all the same. -- jml
+        self._status = None
+        self._details = None
+        self._stop_time = None
+
+    def stopTest(self, test):
+        self._stop_time = self._now()
+        super(TestByTestResult, self).stopTest(test)
+        self._on_test(
+            test=test,
+            status=self._status,
+            start_time=self._start_time,
+            stop_time=self._stop_time,
+            # current_tags is new in testtools 0.9.13.
+            tags=getattr(self, 'current_tags', None),
+            details=self._details)
+
+    def _err_to_details(self, test, err, details):
+        if details:
+            return details
+        return {'traceback': TracebackContent(err, test)}
+
+    def addSuccess(self, test, details=None):
+        super(TestByTestResult, self).addSuccess(test)
+        self._status = 'success'
+        self._details = details
+
+    def addFailure(self, test, err=None, details=None):
+        super(TestByTestResult, self).addFailure(test, err, details)
+        self._status = 'failure'
+        self._details = self._err_to_details(test, err, details)
+
+    def addError(self, test, err=None, details=None):
+        super(TestByTestResult, self).addError(test, err, details)
+        self._status = 'error'
+        self._details = self._err_to_details(test, err, details)
+
+    def addSkip(self, test, reason=None, details=None):
+        super(TestByTestResult, self).addSkip(test, reason, details)
+        self._status = 'skip'
+        if details is None:
+            details = {'reason': text_content(reason)}
+        elif reason:
+            # XXX: What if details already has 'reason' key?
+            details['reason'] = text_content(reason)
+        self._details = details
+
+    def addExpectedFailure(self, test, err=None, details=None):
+        super(TestByTestResult, self).addExpectedFailure(test, err, details)
+        self._status = 'xfail'
+        self._details = self._err_to_details(test, err, details)
+
+    def addUnexpectedSuccess(self, test, details=None):
+        super(TestByTestResult, self).addUnexpectedSuccess(test, details)
+        self._status = 'success'
+        self._details = details
+
+
+class CsvResult(TestByTestResult):
+
+    def __init__(self, stream):
+        super(CsvResult, self).__init__(self._on_test)
+        self._write_row = csv.writer(stream).writerow
+
+    def _on_test(self, test, status, start_time, stop_time, tags, details):
+        self._write_row([test.id(), status, start_time, stop_time])
+
+    def startTestRun(self):
+        super(CsvResult, self).startTestRun()
+        self._write_row(['test', 'status', 'start_time', 'stop_time'])
+
+
+class CatFiles(StreamResult):
+    """Cat file attachments received to a stream."""
+
+    def __init__(self, byte_stream):
+        self.stream = subunit.make_stream_binary(byte_stream)
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        if file_name is not None:
+            self.stream.write(file_bytes)
+            self.stream.flush()
diff --git a/third_party/subunit/python/subunit/tests/__init__.py b/third_party/subunit/python/subunit/tests/__init__.py
new file mode 100644
index 0000000..29aed8d
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/__init__.py
@@ -0,0 +1,69 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import sys
+from unittest import TestLoader
+
+from testscenarios import generate_scenarios
+
+
+# Before the test module imports to avoid circularity.
+# For testing: different pythons have different str() implementations.
+if sys.version_info > (3, 0):
+    _remote_exception_repr = "testtools.testresult.real._StringException"
+    _remote_exception_str = "Traceback (most recent call last):\ntesttools.testresult.real._StringException"
+    _remote_exception_str_chunked = "57\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+else:
+    _remote_exception_repr = "_StringException"
+    _remote_exception_str = "Traceback (most recent call last):\n_StringException"
+    _remote_exception_str_chunked = "3D\r\n" + _remote_exception_str + ": boo qux\n0\r\n"
+
+
+from subunit.tests import (
+    test_chunked,
+    test_details,
+    test_filters,
+    test_output_filter,
+    test_progress_model,
+    test_run,
+    test_subunit_filter,
+    test_subunit_stats,
+    test_subunit_tags,
+    test_tap2subunit,
+    test_test_protocol,
+    test_test_protocol2,
+    test_test_results,
+    )
+
+
+def test_suite():
+    loader = TestLoader()
+    result = loader.loadTestsFromModule(test_chunked)
+    result.addTest(loader.loadTestsFromModule(test_details))
+    result.addTest(loader.loadTestsFromModule(test_filters))
+    result.addTest(loader.loadTestsFromModule(test_progress_model))
+    result.addTest(loader.loadTestsFromModule(test_test_results))
+    result.addTest(loader.loadTestsFromModule(test_test_protocol))
+    result.addTest(loader.loadTestsFromModule(test_test_protocol2))
+    result.addTest(loader.loadTestsFromModule(test_tap2subunit))
+    result.addTest(loader.loadTestsFromModule(test_subunit_filter))
+    result.addTest(loader.loadTestsFromModule(test_subunit_tags))
+    result.addTest(loader.loadTestsFromModule(test_subunit_stats))
+    result.addTest(loader.loadTestsFromModule(test_run))
+    result.addTests(
+        generate_scenarios(loader.loadTestsFromModule(test_output_filter))
+    )
+    return result
diff --git a/third_party/subunit/python/subunit/tests/sample-script.py b/third_party/subunit/python/subunit/tests/sample-script.py
new file mode 100755
index 0000000..ee59ffb
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/sample-script.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+import sys
+if sys.platform == "win32":
+    import msvcrt, os
+    msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+if len(sys.argv) == 2:
+    # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
+    # uses this code path to be sure that the arguments were passed to
+    # sample-script.py
+    print("test fail")
+    print("error fail")
+    sys.exit(0)
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("failure bing crosby [")
+print("foo.c:53:ERROR invalid state")
+print("]")
+print("test an error")
+print("error an error")
+sys.exit(0)
diff --git a/third_party/subunit/python/subunit/tests/sample-two-script.py b/third_party/subunit/python/subunit/tests/sample-two-script.py
new file mode 100755
index 0000000..fc73dfc
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/sample-two-script.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+import sys
+print("test old mcdonald")
+print("success old mcdonald")
+print("test bing crosby")
+print("success bing crosby")
+sys.exit(0)
diff --git a/third_party/subunit/python/subunit/tests/test_chunked.py b/third_party/subunit/python/subunit/tests/test_chunked.py
new file mode 100644
index 0000000..3fe7bba
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_chunked.py
@@ -0,0 +1,146 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#  Copyright (C) 2011  Martin Pool <mbp at sourcefrog.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, BytesIO
+
+import subunit.chunked
+
+
+class TestDecode(unittest.TestCase):
+
+    def setUp(self):
+        unittest.TestCase.setUp(self)
+        self.output = BytesIO()
+        self.decoder = subunit.chunked.Decoder(self.output)
+
+    def test_close_read_length_short_errors(self):
+        self.assertRaises(ValueError, self.decoder.close)
+
+    def test_close_body_short_errors(self):
+        self.assertEqual(None, self.decoder.write(_b('2\r\na')))
+        self.assertRaises(ValueError, self.decoder.close)
+
+    def test_close_body_buffered_data_errors(self):
+        self.assertEqual(None, self.decoder.write(_b('2\r')))
+        self.assertRaises(ValueError, self.decoder.close)
+
+    def test_close_after_finished_stream_safe(self):
+        self.assertEqual(None, self.decoder.write(_b('2\r\nab')))
+        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+        self.decoder.close()
+
+    def test_decode_nothing(self):
+        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+        self.assertEqual(_b(''), self.output.getvalue())
+
+    def test_decode_serialised_form(self):
+        self.assertEqual(None, self.decoder.write(_b("F\r\n")))
+        self.assertEqual(None, self.decoder.write(_b("serialised\n")))
+        self.assertEqual(_b(''), self.decoder.write(_b("form0\r\n")))
+
+    def test_decode_short(self):
+        self.assertEqual(_b(''), self.decoder.write(_b('3\r\nabc0\r\n')))
+        self.assertEqual(_b('abc'), self.output.getvalue())
+
+    def test_decode_combines_short(self):
+        self.assertEqual(_b(''), self.decoder.write(_b('6\r\nabcdef0\r\n')))
+        self.assertEqual(_b('abcdef'), self.output.getvalue())
+
+    def test_decode_excess_bytes_from_write(self):
+        self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+        self.assertEqual(_b('abc'), self.output.getvalue())
+
+    def test_decode_write_after_finished_errors(self):
+        self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
+        self.assertRaises(ValueError, self.decoder.write, _b(''))
+
+    def test_decode_hex(self):
+        self.assertEqual(_b(''), self.decoder.write(_b('A\r\n12345678900\r\n')))
+        self.assertEqual(_b('1234567890'), self.output.getvalue())
+
+    def test_decode_long_ranges(self):
+        self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+        self.assertEqual(None, self.decoder.write(_b('1' * 65536)))
+        self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
+        self.assertEqual(None, self.decoder.write(_b('2' * 65536)))
+        self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
+        self.assertEqual(_b('1' * 65536 + '2' * 65536), self.output.getvalue())
+
+    def test_decode_newline_nonstrict(self):
+        """Tolerate chunk markers with no CR character."""
+        # From <http://pad.lv/505078>
+        self.decoder = subunit.chunked.Decoder(self.output, strict=False)
+        self.assertEqual(None, self.decoder.write(_b('a\n')))
+        self.assertEqual(None, self.decoder.write(_b('abcdeabcde')))
+        self.assertEqual(_b(''), self.decoder.write(_b('0\n')))
+        self.assertEqual(_b('abcdeabcde'), self.output.getvalue())
+
+    def test_decode_strict_newline_only(self):
+        """Reject chunk markers with no CR character in strict mode."""
+        # From <http://pad.lv/505078>
+        self.assertRaises(ValueError,
+            self.decoder.write, _b('a\n'))
+
+    def test_decode_strict_multiple_crs(self):
+        self.assertRaises(ValueError,
+            self.decoder.write, _b('a\r\r\n'))
+
+    def test_decode_short_header(self):
+        self.assertRaises(ValueError,
+            self.decoder.write, _b('\n'))
+
+
+class TestEncode(unittest.TestCase):
+
+    def setUp(self):
+        unittest.TestCase.setUp(self)
+        self.output = BytesIO()
+        self.encoder = subunit.chunked.Encoder(self.output)
+
+    def test_encode_nothing(self):
+        self.encoder.close()
+        self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+    def test_encode_empty(self):
+        self.encoder.write(_b(''))
+        self.encoder.close()
+        self.assertEqual(_b('0\r\n'), self.output.getvalue())
+
+    def test_encode_short(self):
+        self.encoder.write(_b('abc'))
+        self.encoder.close()
+        self.assertEqual(_b('3\r\nabc0\r\n'), self.output.getvalue())
+
+    def test_encode_combines_short(self):
+        self.encoder.write(_b('abc'))
+        self.encoder.write(_b('def'))
+        self.encoder.close()
+        self.assertEqual(_b('6\r\nabcdef0\r\n'), self.output.getvalue())
+
+    def test_encode_over_9_is_in_hex(self):
+        self.encoder.write(_b('1234567890'))
+        self.encoder.close()
+        self.assertEqual(_b('A\r\n12345678900\r\n'), self.output.getvalue())
+
+    def test_encode_long_ranges_not_combined(self):
+        self.encoder.write(_b('1' * 65536))
+        self.encoder.write(_b('2' * 65536))
+        self.encoder.close()
+        self.assertEqual(_b('10000\r\n' + '1' * 65536 + '10000\r\n' +
+            '2' * 65536 + '0\r\n'), self.output.getvalue())
diff --git a/third_party/subunit/python/subunit/tests/test_details.py b/third_party/subunit/python/subunit/tests/test_details.py
new file mode 100644
index 0000000..41ad852
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_details.py
@@ -0,0 +1,106 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import unittest
+
+from testtools.compat import _b, StringIO
+
+import subunit.tests
+from subunit import content, content_type, details
+
+
+class TestSimpleDetails(unittest.TestCase):
+
+    def test_lineReceived(self):
+        parser = details.SimpleDetailsParser(None)
+        parser.lineReceived(_b("foo\n"))
+        parser.lineReceived(_b("bar\n"))
+        self.assertEqual(_b("foo\nbar\n"), parser._message)
+
+    def test_lineReceived_escaped_bracket(self):
+        parser = details.SimpleDetailsParser(None)
+        parser.lineReceived(_b("foo\n"))
+        parser.lineReceived(_b(" ]are\n"))
+        parser.lineReceived(_b("bar\n"))
+        self.assertEqual(_b("foo\n]are\nbar\n"), parser._message)
+
+    def test_get_message(self):
+        parser = details.SimpleDetailsParser(None)
+        self.assertEqual(_b(""), parser.get_message())
+
+    def test_get_details(self):
+        parser = details.SimpleDetailsParser(None)
+        traceback = ""
+        expected = {}
+        expected['traceback'] = content.Content(
+            content_type.ContentType("text", "x-traceback",
+                {'charset': 'utf8'}),
+            lambda:[_b("")])
+        found = parser.get_details()
+        self.assertEqual(expected.keys(), found.keys())
+        self.assertEqual(expected['traceback'].content_type,
+            found['traceback'].content_type)
+        self.assertEqual(_b('').join(expected['traceback'].iter_bytes()),
+            _b('').join(found['traceback'].iter_bytes()))
+
+    def test_get_details_skip(self):
+        parser = details.SimpleDetailsParser(None)
+        traceback = ""
+        expected = {}
+        expected['reason'] = content.Content(
+            content_type.ContentType("text", "plain"),
+            lambda:[_b("")])
+        found = parser.get_details("skip")
+        self.assertEqual(expected, found)
+
+    def test_get_details_success(self):
+        parser = details.SimpleDetailsParser(None)
+        traceback = ""
+        expected = {}
+        expected['message'] = content.Content(
+            content_type.ContentType("text", "plain"),
+            lambda:[_b("")])
+        found = parser.get_details("success")
+        self.assertEqual(expected, found)
+
+
+class TestMultipartDetails(unittest.TestCase):
+
+    def test_get_message_is_None(self):
+        parser = details.MultipartDetailsParser(None)
+        self.assertEqual(None, parser.get_message())
+
+    def test_get_details(self):
+        parser = details.MultipartDetailsParser(None)
+        self.assertEqual({}, parser.get_details())
+
+    def test_parts(self):
+        parser = details.MultipartDetailsParser(None)
+        parser.lineReceived(_b("Content-Type: text/plain\n"))
+        parser.lineReceived(_b("something\n"))
+        parser.lineReceived(_b("F\r\n"))
+        parser.lineReceived(_b("serialised\n"))
+        parser.lineReceived(_b("form0\r\n"))
+        expected = {}
+        expected['something'] = content.Content(
+            content_type.ContentType("text", "plain"),
+            lambda:[_b("serialised\nform")])
+        found = parser.get_details()
+        self.assertEqual(expected.keys(), found.keys())
+        self.assertEqual(expected['something'].content_type,
+            found['something'].content_type)
+        self.assertEqual(_b('').join(expected['something'].iter_bytes()),
+            _b('').join(found['something'].iter_bytes()))
diff --git a/third_party/subunit/python/subunit/tests/test_filters.py b/third_party/subunit/python/subunit/tests/test_filters.py
new file mode 100644
index 0000000..0a5e7c7
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_filters.py
@@ -0,0 +1,35 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import sys
+from tempfile import NamedTemporaryFile
+
+from testtools import TestCase
+
+from subunit.filters import find_stream
+
+
+class TestFindStream(TestCase):
+
+    def test_no_argv(self):
+        self.assertEqual('foo', find_stream('foo', []))
+
+    def test_opens_file(self):
+        f = NamedTemporaryFile()
+        f.write(b'foo')
+        f.flush()
+        stream = find_stream('bar', [f.name])
+        self.assertEqual(b'foo', stream.read())
diff --git a/third_party/subunit/python/subunit/tests/test_output_filter.py b/third_party/subunit/python/subunit/tests/test_output_filter.py
new file mode 100644
index 0000000..0f61ac5
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_output_filter.py
@@ -0,0 +1,596 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2013 Subunit Contributors
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import datetime
+from functools import partial
+from io import BytesIO, StringIO, TextIOWrapper
+import optparse
+import sys
+from tempfile import NamedTemporaryFile
+
+from contextlib import contextmanager
+from testtools import TestCase
+from testtools.compat import _u
+from testtools.matchers import (
+    Equals,
+    Matcher,
+    MatchesAny,
+    MatchesListwise,
+    Mismatch,
+    raises,
+)
+from testtools.testresult.doubles import StreamResult
+
+from subunit.iso8601 import UTC
+from subunit.v2 import StreamResultToBytes, ByteStreamToStreamResult
+from subunit._output import (
+    _ALL_ACTIONS,
+    _FINAL_ACTIONS,
+    generate_stream_results,
+    parse_arguments,
+)
+import subunit._output as _o
+
+
+class SafeOptionParser(optparse.OptionParser):
+    """An ArgumentParser class that doesn't call sys.exit."""
+
+    def exit(self, status=0, message=""):
+        raise RuntimeError(message)
+
+    def error(self, message):
+        raise RuntimeError(message)
+
+
+safe_parse_arguments = partial(parse_arguments, ParserClass=SafeOptionParser)
+
+
+class TestStatusArgParserTests(TestCase):
+
+    scenarios = [
+        (cmd, dict(command=cmd, option='--' + cmd)) for cmd in _ALL_ACTIONS
+    ]
+
+    def test_can_parse_all_commands_with_test_id(self):
+        test_id = self.getUniqueString()
+        args = safe_parse_arguments(args=[self.option, test_id])
+
+        self.assertThat(args.action, Equals(self.command))
+        self.assertThat(args.test_id, Equals(test_id))
+
+    def test_all_commands_parse_file_attachment(self):
+        with NamedTemporaryFile() as tmp_file:
+            args = safe_parse_arguments(
+                args=[self.option, 'foo', '--attach-file', tmp_file.name]
+            )
+            self.assertThat(args.attach_file.name, Equals(tmp_file.name))
+
+    def test_all_commands_accept_mimetype_argument(self):
+        with NamedTemporaryFile() as tmp_file:
+            args = safe_parse_arguments(
+                args=[self.option, 'foo', '--attach-file', tmp_file.name, '--mimetype', "text/plain"]
+            )
+            self.assertThat(args.mimetype, Equals("text/plain"))
+
+    def test_all_commands_accept_file_name_argument(self):
+        with NamedTemporaryFile() as tmp_file:
+            args = safe_parse_arguments(
+                args=[self.option, 'foo', '--attach-file', tmp_file.name, '--file-name', "foo"]
+            )
+            self.assertThat(args.file_name, Equals("foo"))
+
+    def test_all_commands_accept_tags_argument(self):
+        args = safe_parse_arguments(
+            args=[self.option, 'foo', '--tag', "foo", "--tag", "bar", "--tag", "baz"]
+        )
+        self.assertThat(args.tags, Equals(["foo", "bar", "baz"]))
+
+    def test_attach_file_with_hyphen_opens_stdin(self):
+        self.patch(_o.sys, 'stdin', TextIOWrapper(BytesIO(b"Hello")))
+        args = safe_parse_arguments(
+            args=[self.option, "foo", "--attach-file", "-"]
+        )
+
+        self.assertThat(args.attach_file.read(), Equals(b"Hello"))
+
+    def test_attach_file_with_hyphen_sets_filename_to_stdin(self):
+        args = safe_parse_arguments(
+            args=[self.option, "foo", "--attach-file", "-"]
+        )
+
+        self.assertThat(args.file_name, Equals("stdin"))
+
+    def test_can_override_stdin_filename(self):
+        args = safe_parse_arguments(
+            args=[self.option, "foo", "--attach-file", "-", '--file-name', 'foo']
+        )
+
+        self.assertThat(args.file_name, Equals("foo"))
+
+    def test_requires_test_id(self):
+        fn = lambda: safe_parse_arguments(args=[self.option])
+        self.assertThat(
+            fn,
+            raises(RuntimeError('argument %s: must specify a single TEST_ID.' % self.option))
+        )
+
+
+class ArgParserTests(TestCase):
+
+    def test_can_parse_attach_file_without_test_id(self):
+        with NamedTemporaryFile() as tmp_file:
+            args = safe_parse_arguments(
+                args=["--attach-file", tmp_file.name]
+            )
+            self.assertThat(args.attach_file.name, Equals(tmp_file.name))
+
+    def test_can_run_without_args(self):
+        args = safe_parse_arguments([])
+
+    def test_cannot_specify_more_than_one_status_command(self):
+        fn = lambda: safe_parse_arguments(['--fail', 'foo', '--skip', 'bar'])
+        self.assertThat(
+            fn,
+            raises(RuntimeError('argument --skip: Only one status may be specified at once.'))
+        )
+
+    def test_cannot_specify_mimetype_without_attach_file(self):
+        fn = lambda: safe_parse_arguments(['--mimetype', 'foo'])
+        self.assertThat(
+            fn,
+            raises(RuntimeError('Cannot specify --mimetype without --attach-file'))
+        )
+
+    def test_cannot_specify_filename_without_attach_file(self):
+        fn = lambda: safe_parse_arguments(['--file-name', 'foo'])
+        self.assertThat(
+            fn,
+            raises(RuntimeError('Cannot specify --file-name without --attach-file'))
+        )
+
+    def test_can_specify_tags_without_status_command(self):
+        args = safe_parse_arguments(['--tag', 'foo'])
+        self.assertEqual(['foo'], args.tags)
+
+    def test_must_specify_tags_with_tags_options(self):
+        fn = lambda: safe_parse_arguments(['--fail', 'foo', '--tag'])
+        self.assertThat(
+            fn,
+            MatchesAny(
+                raises(RuntimeError('--tag option requires 1 argument')),
+                raises(RuntimeError('--tag option requires an argument')),
+            )
+        )
+
+def get_result_for(commands):
+    """Get a result object from *commands.
+
+    Runs the 'generate_stream_results' function from subunit._output after
+    parsing *commands as if they were specified on the command line. The
+    resulting bytestream is then converted back into a result object and
+    returned.
+    """
+    result = StreamResult()
+    args = safe_parse_arguments(commands)
+    generate_stream_results(args, result)
+    return result
+
+
+ at contextmanager
+def temp_file_contents(data):
+    """Create a temporary file on disk containing 'data'."""
+    with NamedTemporaryFile() as f:
+        f.write(data)
+        f.seek(0)
+        yield f
+
+
+class StatusStreamResultTests(TestCase):
+
+    scenarios = [
+        (s, dict(status=s, option='--' + s)) for s in _ALL_ACTIONS
+    ]
+
+    _dummy_timestamp = datetime.datetime(2013, 1, 1, 0, 0, 0, 0, UTC)
+
+    def setUp(self):
+        super(StatusStreamResultTests, self).setUp()
+        self.patch(_o, 'create_timestamp', lambda: self._dummy_timestamp)
+        self.test_id = self.getUniqueString()
+
+    def test_only_one_packet_is_generated(self):
+        result = get_result_for([self.option, self.test_id])
+        self.assertThat(
+            len(result._events),
+            Equals(3) # startTestRun and stopTestRun are also called, making 3 total.
+        )
+
+    def test_correct_status_is_generated(self):
+        result = get_result_for([self.option, self.test_id])
+
+        self.assertThat(
+            result._events[1],
+            MatchesStatusCall(test_status=self.status)
+        )
+
+    def test_all_commands_generate_tags(self):
+        result = get_result_for([self.option, self.test_id, '--tag', 'hello', '--tag', 'world'])
+        self.assertThat(
+            result._events[1],
+            MatchesStatusCall(test_tags=set(['hello', 'world']))
+        )
+
+    def test_all_commands_generate_timestamp(self):
+        result = get_result_for([self.option, self.test_id])
+
+        self.assertThat(
+            result._events[1],
+            MatchesStatusCall(timestamp=self._dummy_timestamp)
+        )
+
+    def test_all_commands_generate_correct_test_id(self):
+        result = get_result_for([self.option, self.test_id])
+
+        self.assertThat(
+            result._events[1],
+            MatchesStatusCall(test_id=self.test_id)
+        )
+
+    def test_file_is_sent_in_single_packet(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_can_read_binary_files(self):
+        with temp_file_contents(b"\xDE\xAD\xBE\xEF") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_bytes=b"\xDE\xAD\xBE\xEF", eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_can_read_empty_files(self):
+        with temp_file_contents(b"") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_bytes=b"", file_name=f.name, eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_can_read_stdin(self):
+        self.patch(_o.sys, 'stdin', TextIOWrapper(BytesIO(b"\xFE\xED\xFA\xCE")))
+        result = get_result_for([self.option, self.test_id, '--attach-file', '-'])
+
+        self.assertThat(
+            result._events,
+            MatchesListwise([
+                MatchesStatusCall(call='startTestRun'),
+                MatchesStatusCall(file_bytes=b"\xFE\xED\xFA\xCE", file_name='stdin', eof=True),
+                MatchesStatusCall(call='stopTestRun'),
+            ])
+        )
+
+    def test_file_is_sent_with_test_id(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_file_is_sent_with_test_status(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_status=self.status, file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_file_chunk_size_is_honored(self):
+        with temp_file_contents(b"Hello") as f:
+            self.patch(_o, '_CHUNK_SIZE', 1)
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'H', eof=False),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'e', eof=False),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'l', eof=False),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'l', eof=False),
+                    MatchesStatusCall(test_id=self.test_id, file_bytes=b'o', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_file_mimetype_specified_once_only(self):
+        with temp_file_contents(b"Hi") as f:
+            self.patch(_o, '_CHUNK_SIZE', 1)
+            result = get_result_for([
+                self.option,
+                self.test_id,
+                '--attach-file',
+                f.name,
+                '--mimetype',
+                'text/plain',
+            ])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=self.test_id, mime_type='text/plain', file_bytes=b'H', eof=False),
+                    MatchesStatusCall(test_id=self.test_id, mime_type=None, file_bytes=b'i', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_tags_specified_once_only(self):
+        with temp_file_contents(b"Hi") as f:
+            self.patch(_o, '_CHUNK_SIZE', 1)
+            result = get_result_for([
+                self.option,
+                self.test_id,
+                '--attach-file',
+                f.name,
+                '--tag',
+                'foo',
+                '--tag',
+                'bar',
+            ])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=self.test_id, test_tags=set(['foo', 'bar'])),
+                    MatchesStatusCall(test_id=self.test_id, test_tags=None),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_timestamp_specified_once_only(self):
+        with temp_file_contents(b"Hi") as f:
+            self.patch(_o, '_CHUNK_SIZE', 1)
+            result = get_result_for([
+                self.option,
+                self.test_id,
+                '--attach-file',
+                f.name,
+            ])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=self.test_id, timestamp=self._dummy_timestamp),
+                    MatchesStatusCall(test_id=self.test_id, timestamp=None),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_test_status_specified_once_only(self):
+        with temp_file_contents(b"Hi") as f:
+            self.patch(_o, '_CHUNK_SIZE', 1)
+            result = get_result_for([
+                self.option,
+                self.test_id,
+                '--attach-file',
+                f.name,
+            ])
+
+            # 'inprogress' status should be on the first packet only, all other
+            # statuses should be on the last packet.
+            if self.status in _FINAL_ACTIONS:
+                first_call = MatchesStatusCall(test_id=self.test_id, test_status=None)
+                last_call = MatchesStatusCall(test_id=self.test_id, test_status=self.status)
+            else:
+                first_call = MatchesStatusCall(test_id=self.test_id, test_status=self.status)
+                last_call = MatchesStatusCall(test_id=self.test_id, test_status=None)
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    first_call,
+                    last_call,
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_filename_can_be_overridden(self):
+        with temp_file_contents(b"Hello") as f:
+            specified_file_name = self.getUniqueString()
+            result = get_result_for([
+                self.option,
+                self.test_id,
+                '--attach-file',
+                f.name,
+                '--file-name',
+                specified_file_name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_name=specified_file_name, file_bytes=b'Hello'),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_file_name_is_used_by_default(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for([self.option, self.test_id, '--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_name=f.name, file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+
+class FileDataTests(TestCase):
+
+    def test_can_attach_file_without_test_id(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for(['--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(test_id=None, file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_file_name_is_used_by_default(self):
+        with temp_file_contents(b"Hello") as f:
+            result = get_result_for(['--attach-file', f.name])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_name=f.name, file_bytes=b'Hello', eof=True),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_filename_can_be_overridden(self):
+        with temp_file_contents(b"Hello") as f:
+            specified_file_name = self.getUniqueString()
+            result = get_result_for([
+                '--attach-file',
+                f.name,
+                '--file-name',
+                specified_file_name
+            ])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_name=specified_file_name, file_bytes=b'Hello'),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_files_have_timestamp(self):
+        _dummy_timestamp = datetime.datetime(2013, 1, 1, 0, 0, 0, 0, UTC)
+        self.patch(_o, 'create_timestamp', lambda: _dummy_timestamp)
+
+        with temp_file_contents(b"Hello") as f:
+            specified_file_name = self.getUniqueString()
+            result = get_result_for([
+                '--attach-file',
+                f.name,
+            ])
+
+            self.assertThat(
+                result._events,
+                MatchesListwise([
+                    MatchesStatusCall(call='startTestRun'),
+                    MatchesStatusCall(file_bytes=b'Hello', timestamp=_dummy_timestamp),
+                    MatchesStatusCall(call='stopTestRun'),
+                ])
+            )
+
+    def test_can_specify_tags_without_test_status(self):
+        result = get_result_for([
+            '--tag',
+            'foo',
+        ])
+
+        self.assertThat(
+            result._events,
+            MatchesListwise([
+                MatchesStatusCall(call='startTestRun'),
+                MatchesStatusCall(test_tags=set(['foo'])),
+                MatchesStatusCall(call='stopTestRun'),
+            ])
+        )
+
+
+class MatchesStatusCall(Matcher):
+
+    _position_lookup = {
+        'call': 0,
+        'test_id': 1,
+        'test_status': 2,
+        'test_tags': 3,
+        'runnable': 4,
+        'file_name': 5,
+        'file_bytes': 6,
+        'eof': 7,
+        'mime_type': 8,
+        'route_code': 9,
+        'timestamp': 10,
+    }
+
+    def __init__(self, **kwargs):
+        unknown_kwargs = list(filter(
+            lambda k: k not in self._position_lookup,
+            kwargs
+        ))
+        if unknown_kwargs:
+            raise ValueError("Unknown keywords: %s" % ','.join(unknown_kwargs))
+        self._filters = kwargs
+
+    def match(self, call_tuple):
+        for k, v in self._filters.items():
+            try:
+                pos = self._position_lookup[k]
+                if call_tuple[pos] != v:
+                    return Mismatch(
+                        "Value for key is %r, not %r" % (call_tuple[pos], v)
+                    )
+            except IndexError:
+                return Mismatch("Key %s is not present." % k)
+
+    def __str__(self):
+        return "<MatchesStatusCall %r>" % self._filters
diff --git a/third_party/subunit/python/subunit/tests/test_progress_model.py b/third_party/subunit/python/subunit/tests/test_progress_model.py
new file mode 100644
index 0000000..ac03120
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_progress_model.py
@@ -0,0 +1,112 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import unittest
+
+import subunit
+from subunit.progress_model import ProgressModel
+
+
+class TestProgressModel(unittest.TestCase):
+
+    def assertProgressSummary(self, pos, total, progress):
+        """Assert that a progress model has reached a particular point."""
+        self.assertEqual(pos, progress.pos())
+        self.assertEqual(total, progress.width())
+
+    def test_new_progress_0_0(self):
+        progress = ProgressModel()
+        self.assertProgressSummary(0, 0, progress)
+
+    def test_advance_0_0(self):
+        progress = ProgressModel()
+        progress.advance()
+        self.assertProgressSummary(1, 0, progress)
+
+    def test_advance_1_0(self):
+        progress = ProgressModel()
+        progress.advance()
+        self.assertProgressSummary(1, 0, progress)
+
+    def test_set_width_absolute(self):
+        progress = ProgressModel()
+        progress.set_width(10)
+        self.assertProgressSummary(0, 10, progress)
+
+    def test_set_width_absolute_preserves_pos(self):
+        progress = ProgressModel()
+        progress.advance()
+        progress.set_width(2)
+        self.assertProgressSummary(1, 2, progress)
+
+    def test_adjust_width(self):
+        progress = ProgressModel()
+        progress.adjust_width(10)
+        self.assertProgressSummary(0, 10, progress)
+        progress.adjust_width(-10)
+        self.assertProgressSummary(0, 0, progress)
+
+    def test_adjust_width_preserves_pos(self):
+        progress = ProgressModel()
+        progress.advance()
+        progress.adjust_width(10)
+        self.assertProgressSummary(1, 10, progress)
+        progress.adjust_width(-10)
+        self.assertProgressSummary(1, 0, progress)
+
+    def test_push_preserves_progress(self):
+        progress = ProgressModel()
+        progress.adjust_width(3)
+        progress.advance()
+        progress.push()
+        self.assertProgressSummary(1, 3, progress)
+
+    def test_advance_advances_substack(self):
+        progress = ProgressModel()
+        progress.adjust_width(3)
+        progress.advance()
+        progress.push()
+        progress.adjust_width(1)
+        progress.advance()
+        self.assertProgressSummary(2, 3, progress)
+
+    def test_adjust_width_adjusts_substack(self):
+        progress = ProgressModel()
+        progress.adjust_width(3)
+        progress.advance()
+        progress.push()
+        progress.adjust_width(2)
+        progress.advance()
+        self.assertProgressSummary(3, 6, progress)
+
+    def test_set_width_adjusts_substack(self):
+        progress = ProgressModel()
+        progress.adjust_width(3)
+        progress.advance()
+        progress.push()
+        progress.set_width(2)
+        progress.advance()
+        self.assertProgressSummary(3, 6, progress)
+
+    def test_pop_restores_progress(self):
+        progress = ProgressModel()
+        progress.adjust_width(3)
+        progress.advance()
+        progress.push()
+        progress.adjust_width(1)
+        progress.advance()
+        progress.pop()
+        self.assertProgressSummary(1, 3, progress)
diff --git a/third_party/subunit/python/subunit/tests/test_run.py b/third_party/subunit/python/subunit/tests/test_run.py
new file mode 100644
index 0000000..1c823da
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_run.py
@@ -0,0 +1,88 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2011  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import io
+import unittest
+
+from testtools import PlaceHolder, TestCase
+from testtools.compat import _b
+from testtools.matchers import StartsWith
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+from subunit import run
+from subunit.run import SubunitTestRunner
+
+
+class TestSubunitTestRunner(TestCase):
+
+    def test_includes_timing_output(self):
+        bytestream = io.BytesIO()
+        runner = SubunitTestRunner(stream=bytestream)
+        test = PlaceHolder('name')
+        runner.run(test)
+        bytestream.seek(0)
+        eventstream = StreamResult()
+        subunit.ByteStreamToStreamResult(bytestream).run(eventstream)
+        timestamps = [event[-1] for event in eventstream._events
+            if event is not None]
+        self.assertNotEqual([], timestamps)
+
+    def test_enumerates_tests_before_run(self):
+        bytestream = io.BytesIO()
+        runner = SubunitTestRunner(stream=bytestream)
+        test1 = PlaceHolder('name1')
+        test2 = PlaceHolder('name2')
+        case = unittest.TestSuite([test1, test2])
+        runner.run(case)
+        bytestream.seek(0)
+        eventstream = StreamResult()
+        subunit.ByteStreamToStreamResult(bytestream).run(eventstream)
+        self.assertEqual([
+            ('status', 'name1', 'exists'),
+            ('status', 'name2', 'exists'),
+            ], [event[:3] for event in eventstream._events[:2]])
+
+    def test_list_errors_if_errors_from_list_test(self):
+        bytestream = io.BytesIO()
+        runner = SubunitTestRunner(stream=bytestream)
+        def list_test(test):
+            return [], ['failed import']
+        self.patch(run, 'list_test', list_test)
+        exc = self.assertRaises(SystemExit, runner.list, None)
+        self.assertEqual((2,), exc.args)
+
+    class FailingTest(TestCase):
+        def test_fail(self):
+            1/0
+
+    def test_exits_zero_when_tests_fail(self):
+        bytestream = io.BytesIO()
+        stream = io.TextIOWrapper(bytestream, encoding="utf8")
+        try:
+            self.assertEqual(None, run.main(
+                argv=["progName", "subunit.tests.test_run.TestSubunitTestRunner.FailingTest"],
+                stdout=stream))
+        except SystemExit:
+            self.fail("SystemExit raised")
+        self.assertThat(bytestream.getvalue(), StartsWith(_b('\xb3')))
+
+    def test_exits_nonzero_when_execution_errors(self):
+        bytestream = io.BytesIO()
+        stream = io.TextIOWrapper(bytestream, encoding="utf8")
+        exc = self.assertRaises(Exception, run.main,
+                argv=["progName", "subunit.tests.test_run.TestSubunitTestRunner.MissingTest"],
+                stdout=stream)
diff --git a/third_party/subunit/python/subunit/tests/test_subunit_filter.py b/third_party/subunit/python/subunit/tests/test_subunit_filter.py
new file mode 100644
index 0000000..5f34b3b
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_subunit_filter.py
@@ -0,0 +1,346 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Tests for subunit.TestResultFilter."""
+
+from datetime import datetime
+import os
+import subprocess
+import sys
+from subunit import iso8601
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _b, BytesIO
+from testtools.testresult.doubles import ExtendedTestResult, StreamResult
+
+import subunit
+from subunit.test_results import make_tag_filter, TestResultFilter
+from subunit import ByteStreamToStreamResult, StreamResultToBytes
+
+
+class TestTestResultFilter(TestCase):
+    """Test for TestResultFilter, a TestResult object which filters tests."""
+
+    # While TestResultFilter works on python objects, using a subunit stream
+    # is an easy pithy way of getting a series of test objects to call into
+    # the TestResult, and as TestResultFilter is intended for use with subunit
+    # also has the benefit of detecting any interface skew issues.
+    example_subunit_stream = _b("""\
+tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error [
+error details
+]
+test skipped
+skip skipped
+test todo
+xfail todo
+""")
+
+    def run_tests(self, result_filter, input_stream=None):
+        """Run tests through the given filter.
+
+        :param result_filter: A filtering TestResult object.
+        :param input_stream: Bytes of subunit stream data. If not provided,
+            uses TestTestResultFilter.example_subunit_stream.
+        """
+        if input_stream is None:
+            input_stream = self.example_subunit_stream
+        test = subunit.ProtocolTestCase(BytesIO(input_stream))
+        test.run(result_filter)
+
+    def test_default(self):
+        """The default is to exclude success and include everything else."""
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result)
+        self.run_tests(result_filter)
+        # skips are seen as success by default python TestResult.
+        self.assertEqual(['error'],
+            [error[0].id() for error in filtered_result.errors])
+        self.assertEqual(['failed'],
+            [failure[0].id() for failure in
+            filtered_result.failures])
+        self.assertEqual(4, filtered_result.testsRun)
+
+    def test_tag_filter(self):
+        tag_filter = make_tag_filter(['global'], ['local'])
+        result = ExtendedTestResult()
+        result_filter = TestResultFilter(
+            result, filter_success=False, filter_predicate=tag_filter)
+        self.run_tests(result_filter)
+        tests_included = [
+            event[1] for event in result._events if event[0] == 'startTest']
+        tests_expected = list(map(
+            subunit.RemotedTestCase,
+            ['passed', 'error', 'skipped', 'todo']))
+        self.assertEquals(tests_expected, tests_included)
+
+    def test_tags_tracked_correctly(self):
+        tag_filter = make_tag_filter(['a'], [])
+        result = ExtendedTestResult()
+        result_filter = TestResultFilter(
+            result, filter_success=False, filter_predicate=tag_filter)
+        input_stream = _b(
+            "test: foo\n"
+            "tags: a\n"
+            "successful: foo\n"
+            "test: bar\n"
+            "successful: bar\n")
+        self.run_tests(result_filter, input_stream)
+        foo = subunit.RemotedTestCase('foo')
+        self.assertEquals(
+            [('startTest', foo),
+             ('tags', set(['a']), set()),
+             ('addSuccess', foo),
+             ('stopTest', foo),
+             ],
+            result._events)
+
+    def test_exclude_errors(self):
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result, filter_error=True)
+        self.run_tests(result_filter)
+        # skips are seen as errors by default python TestResult.
+        self.assertEqual([], filtered_result.errors)
+        self.assertEqual(['failed'],
+            [failure[0].id() for failure in
+            filtered_result.failures])
+        self.assertEqual(3, filtered_result.testsRun)
+
+    def test_fixup_expected_failures(self):
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result,
+            fixup_expected_failures=set(["failed"]))
+        self.run_tests(result_filter)
+        self.assertEqual(['failed', 'todo'],
+            [failure[0].id() for failure in filtered_result.expectedFailures])
+        self.assertEqual([], filtered_result.failures)
+        self.assertEqual(4, filtered_result.testsRun)
+
+    def test_fixup_expected_errors(self):
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result,
+            fixup_expected_failures=set(["error"]))
+        self.run_tests(result_filter)
+        self.assertEqual(['error', 'todo'],
+            [failure[0].id() for failure in filtered_result.expectedFailures])
+        self.assertEqual([], filtered_result.errors)
+        self.assertEqual(4, filtered_result.testsRun)
+
+    def test_fixup_unexpected_success(self):
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result, filter_success=False,
+            fixup_expected_failures=set(["passed"]))
+        self.run_tests(result_filter)
+        self.assertEqual(['passed'],
+            [passed.id() for passed in filtered_result.unexpectedSuccesses])
+        self.assertEqual(5, filtered_result.testsRun)
+
+    def test_exclude_failure(self):
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result, filter_failure=True)
+        self.run_tests(result_filter)
+        self.assertEqual(['error'],
+            [error[0].id() for error in filtered_result.errors])
+        self.assertEqual([],
+            [failure[0].id() for failure in
+            filtered_result.failures])
+        self.assertEqual(3, filtered_result.testsRun)
+
+    def test_exclude_skips(self):
+        filtered_result = subunit.TestResultStats(None)
+        result_filter = TestResultFilter(filtered_result, filter_skip=True)
+        self.run_tests(result_filter)
+        self.assertEqual(0, filtered_result.skipped_tests)
+        self.assertEqual(2, filtered_result.failed_tests)
+        self.assertEqual(3, filtered_result.testsRun)
+
+    def test_include_success(self):
+        """Successes can be included if requested."""
+        filtered_result = unittest.TestResult()
+        result_filter = TestResultFilter(filtered_result,
+            filter_success=False)
+        self.run_tests(result_filter)
+        self.assertEqual(['error'],
+            [error[0].id() for error in filtered_result.errors])
+        self.assertEqual(['failed'],
+            [failure[0].id() for failure in
+            filtered_result.failures])
+        self.assertEqual(5, filtered_result.testsRun)
+
+    def test_filter_predicate(self):
+        """You can filter by predicate callbacks"""
+        # 0.0.7 and earlier did not support the 'tags' parameter, so we need
+        # to test that we still support behaviour without it.
+        filtered_result = unittest.TestResult()
+        def filter_cb(test, outcome, err, details):
+            return outcome == 'success'
+        result_filter = TestResultFilter(filtered_result,
+            filter_predicate=filter_cb,
+            filter_success=False)
+        self.run_tests(result_filter)
+        # Only success should pass
+        self.assertEqual(1, filtered_result.testsRun)
+
+    def test_filter_predicate_with_tags(self):
+        """You can filter by predicate callbacks that accept tags"""
+        filtered_result = unittest.TestResult()
+        def filter_cb(test, outcome, err, details, tags):
+            return outcome == 'success'
+        result_filter = TestResultFilter(filtered_result,
+            filter_predicate=filter_cb,
+            filter_success=False)
+        self.run_tests(result_filter)
+        # Only success should pass
+        self.assertEqual(1, filtered_result.testsRun)
+
+    def test_time_ordering_preserved(self):
+        # Passing a subunit stream through TestResultFilter preserves the
+        # relative ordering of 'time' directives and any other subunit
+        # directives that are still included.
+        date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+        date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+        date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+        subunit_stream = _b('\n'.join([
+            "time: %s",
+            "test: foo",
+            "time: %s",
+            "error: foo",
+            "time: %s",
+            ""]) % (date_a, date_b, date_c))
+        result = ExtendedTestResult()
+        result_filter = TestResultFilter(result)
+        self.run_tests(result_filter, subunit_stream)
+        foo = subunit.RemotedTestCase('foo')
+        self.maxDiff = None
+        self.assertEqual(
+            [('time', date_a),
+             ('time', date_b),
+             ('startTest', foo),
+             ('addError', foo, {}),
+             ('stopTest', foo),
+             ('time', date_c)], result._events)
+
+    def test_time_passes_through_filtered_tests(self):
+        # Passing a subunit stream through TestResultFilter preserves 'time'
+        # directives even if a specific test is filtered out.
+        date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
+        date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
+        date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
+        subunit_stream = _b('\n'.join([
+            "time: %s",
+            "test: foo",
+            "time: %s",
+            "success: foo",
+            "time: %s",
+            ""]) % (date_a, date_b, date_c))
+        result = ExtendedTestResult()
+        result_filter = TestResultFilter(result)
+        result_filter.startTestRun()
+        self.run_tests(result_filter, subunit_stream)
+        result_filter.stopTestRun()
+        foo = subunit.RemotedTestCase('foo')
+        self.maxDiff = None
+        self.assertEqual(
+            [('startTestRun',),
+             ('time', date_a),
+             ('time', date_c),
+             ('stopTestRun',),], result._events)
+
+    def test_skip_preserved(self):
+        subunit_stream = _b('\n'.join([
+            "test: foo",
+            "skip: foo",
+            ""]))
+        result = ExtendedTestResult()
+        result_filter = TestResultFilter(result)
+        self.run_tests(result_filter, subunit_stream)
+        foo = subunit.RemotedTestCase('foo')
+        self.assertEquals(
+            [('startTest', foo),
+             ('addSkip', foo, {}),
+             ('stopTest', foo), ], result._events)
+
+    if sys.version_info < (2, 7):
+        # These tests require Python >=2.7.
+        del test_fixup_expected_failures, test_fixup_expected_errors, test_fixup_unexpected_success
+
+
+class TestFilterCommand(TestCase):
+
+    def run_command(self, args, stream):
+        root = os.path.dirname(
+            os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
+        script_path = os.path.join(root, 'filters', 'subunit-filter')
+        command = [sys.executable, script_path] + list(args)
+        ps = subprocess.Popen(
+            command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE)
+        out, err = ps.communicate(stream)
+        if ps.returncode != 0:
+            raise RuntimeError("%s failed: %s" % (command, err))
+        return out
+
+    def test_default(self):
+        byte_stream = BytesIO()
+        stream = StreamResultToBytes(byte_stream)
+        stream.status(test_id="foo", test_status="inprogress")
+        stream.status(test_id="foo", test_status="skip")
+        output = self.run_command([], byte_stream.getvalue())
+        events = StreamResult()
+        ByteStreamToStreamResult(BytesIO(output)).run(events)
+        ids = set(event[1] for event in events._events)
+        self.assertEqual([
+            ('status', 'foo', 'inprogress'),
+            ('status', 'foo', 'skip'),
+            ], [event[:3] for event in events._events])
+
+    def test_tags(self):
+        byte_stream = BytesIO()
+        stream = StreamResultToBytes(byte_stream)
+        stream.status(
+            test_id="foo", test_status="inprogress", test_tags=set(["a"]))
+        stream.status(
+            test_id="foo", test_status="success", test_tags=set(["a"]))
+        stream.status(test_id="bar", test_status="inprogress")
+        stream.status(test_id="bar", test_status="inprogress")
+        stream.status(
+            test_id="baz", test_status="inprogress", test_tags=set(["a"]))
+        stream.status(
+            test_id="baz", test_status="success", test_tags=set(["a"]))
+        output = self.run_command(
+            ['-s', '--with-tag', 'a'], byte_stream.getvalue())
+        events = StreamResult()
+        ByteStreamToStreamResult(BytesIO(output)).run(events)
+        ids = set(event[1] for event in events._events)
+        self.assertEqual(set(['foo', 'baz']), ids)
+
+    def test_no_passthrough(self):
+        output = self.run_command(['--no-passthrough'], b'hi thar')
+        self.assertEqual(b'', output)
+
+    def test_passthrough(self):
+        output = self.run_command([], b'hi thar')
+        byte_stream = BytesIO()
+        stream = StreamResultToBytes(byte_stream)
+        stream.status(file_name="stdout", file_bytes=b'hi thar')
+        self.assertEqual(byte_stream.getvalue(), output)
diff --git a/third_party/subunit/python/subunit/tests/test_subunit_stats.py b/third_party/subunit/python/subunit/tests/test_subunit_stats.py
new file mode 100644
index 0000000..0da132d
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_subunit_stats.py
@@ -0,0 +1,78 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Tests for subunit.TestResultStats."""
+
+import unittest
+
+from testtools.compat import _b, BytesIO, StringIO
+
+import subunit
+
+
+class TestTestResultStats(unittest.TestCase):
+    """Test for TestResultStats, a TestResult object that generates stats."""
+
+    def setUp(self):
+        self.output = StringIO()
+        self.result = subunit.TestResultStats(self.output)
+        self.input_stream = BytesIO()
+        self.test = subunit.ProtocolTestCase(self.input_stream)
+
+    def test_stats_empty(self):
+        self.test.run(self.result)
+        self.assertEqual(0, self.result.total_tests)
+        self.assertEqual(0, self.result.passed_tests)
+        self.assertEqual(0, self.result.failed_tests)
+        self.assertEqual(set(), self.result.seen_tags)
+
+    def setUpUsedStream(self):
+        self.input_stream.write(_b("""tags: global
+test passed
+success passed
+test failed
+tags: local
+failure failed
+test error
+error error
+test skipped
+skip skipped
+test todo
+xfail todo
+"""))
+        self.input_stream.seek(0)
+        self.test.run(self.result)
+
+    def test_stats_smoke_everything(self):
+        # Statistics are calculated usefully.
+        self.setUpUsedStream()
+        self.assertEqual(5, self.result.total_tests)
+        self.assertEqual(2, self.result.passed_tests)
+        self.assertEqual(2, self.result.failed_tests)
+        self.assertEqual(1, self.result.skipped_tests)
+        self.assertEqual(set(["global", "local"]), self.result.seen_tags)
+
+    def test_stat_formatting(self):
+        expected = ("""
+Total tests:       5
+Passed tests:      2
+Failed tests:      2
+Skipped tests:     1
+Seen tags: global, local
+""")[1:]
+        self.setUpUsedStream()
+        self.result.formatStats()
+        self.assertEqual(expected, self.output.getvalue())
diff --git a/third_party/subunit/python/subunit/tests/test_subunit_tags.py b/third_party/subunit/python/subunit/tests/test_subunit_tags.py
new file mode 100644
index 0000000..9bc3cf6
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_subunit_tags.py
@@ -0,0 +1,85 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Tests for subunit.tag_stream."""
+
+from io import BytesIO
+
+import testtools
+from testtools.matchers import Contains
+
+import subunit
+import subunit.test_results
+
+
+class TestSubUnitTags(testtools.TestCase):
+
+    def setUp(self):
+        super(TestSubUnitTags, self).setUp()
+        self.original = BytesIO()
+        self.filtered = BytesIO()
+
+    def test_add_tag(self):
+        # Literal values to avoid set sort-order dependencies. Python code show
+        # derivation.
+        # reference = BytesIO()
+        # stream = subunit.StreamResultToBytes(reference)
+        # stream.status(
+        #     test_id='test', test_status='inprogress', test_tags=set(['quux', 'foo']))
+        # stream.status(
+        #     test_id='test', test_status='success', test_tags=set(['bar', 'quux', 'foo']))
+        reference = [
+            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+                b'\x83\x1b\x04test\x03\x03bar\x04quux\x03fooqn\xab)',
+            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+                b'\x83\x1b\x04test\x03\x04quux\x03foo\x03bar\xaf\xbd\x9d\xd6',
+            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+                b'\x83\x1b\x04test\x03\x04quux\x03bar\x03foo\x03\x04b\r',
+            b'\xb3)\x82\x17\x04test\x02\x04quux\x03foo\x05\x97n\x86\xb3)'
+                b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+                b'\x83\x1b\x04test\x03\x03foo\x04quux\x03bar\x08\xc2X\x83',
+            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+                b'\x83\x1b\x04test\x03\x03bar\x03foo\x04quux\xd2\x18\x1bC',
+            b'\xb3)\x82\x17\x04test\x02\x03foo\x04quux\xa6\xe1\xde\xec\xb3)'
+                b'\x83\x1b\x04test\x03\x03foo\x03bar\x04quux:\x05e\x80',
+            ]
+        stream = subunit.StreamResultToBytes(self.original)
+        stream.status(
+            test_id='test', test_status='inprogress', test_tags=set(['foo']))
+        stream.status(
+            test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+        self.original.seek(0)
+        self.assertEqual(
+            0, subunit.tag_stream(self.original, self.filtered, ["quux"]))
+        self.assertThat(reference, Contains(self.filtered.getvalue()))
+
+    def test_remove_tag(self):
+        reference = BytesIO()
+        stream = subunit.StreamResultToBytes(reference)
+        stream.status(
+            test_id='test', test_status='inprogress', test_tags=set(['foo']))
+        stream.status(
+            test_id='test', test_status='success', test_tags=set(['foo']))
+        stream = subunit.StreamResultToBytes(self.original)
+        stream.status(
+            test_id='test', test_status='inprogress', test_tags=set(['foo']))
+        stream.status(
+            test_id='test', test_status='success', test_tags=set(['foo', 'bar']))
+        self.original.seek(0)
+        self.assertEqual(
+            0, subunit.tag_stream(self.original, self.filtered, ["-bar"]))
+        self.assertEqual(reference.getvalue(), self.filtered.getvalue())
diff --git a/third_party/subunit/python/subunit/tests/test_tap2subunit.py b/third_party/subunit/python/subunit/tests/test_tap2subunit.py
new file mode 100644
index 0000000..9f1f5d4
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_tap2subunit.py
@@ -0,0 +1,387 @@
+#
+#  subunit: extensions to python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+"""Tests for TAP2SubUnit."""
+
+from io import BytesIO, StringIO
+import unittest
+
+from testtools import TestCase
+from testtools.compat import _u
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+
+UTF8_TEXT = 'text/plain; charset=UTF8'
+
+
+class TestTAP2SubUnit(TestCase):
+    """Tests for TAP2SubUnit.
+
+    These tests test TAP string data in, and subunit string data out.
+    This is ok because the subunit protocol is intended to be stable,
+    but it might be easier/pithier to write tests against TAP string in,
+    parsed subunit objects out (by hooking the subunit stream to a subunit
+    protocol server.
+    """
+
+    def setUp(self):
+        super(TestTAP2SubUnit, self).setUp()
+        self.tap = StringIO()
+        self.subunit = BytesIO()
+
+    def test_skip_entire_file(self):
+        # A file
+        # 1..- # Skipped: comment
+        # results in a single skipped test.
+        self.tap.write(_u("1..0 # Skipped: entire file skipped\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'file skip', 'skip', None, True,
+            'tap comment', b'Skipped: entire file skipped', True, None, None,
+            None)])
+
+    def test_ok_test_pass(self):
+        # A file
+        # ok
+        # results in a passed test with name 'test 1' (a synthetic name as tap
+        # does not require named fixtures - it is the first test in the tap
+        # stream).
+        self.tap.write(_u("ok\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'success', None, False, None,
+            None, True, None, None, None)])
+
+    def test_ok_test_number_pass(self):
+        # A file
+        # ok 1
+        # results in a passed test with name 'test 1'
+        self.tap.write(_u("ok 1\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'success', None, False, None,
+            None, True, None, None, None)])
+
+    def test_ok_test_number_description_pass(self):
+        # A file
+        # ok 1 - There is a description
+        # results in a passed test with name 'test 1 - There is a description'
+        self.tap.write(_u("ok 1 - There is a description\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1 - There is a description',
+            'success', None, False, None, None, True, None, None, None)])
+
+    def test_ok_test_description_pass(self):
+        # A file
+        # ok There is a description
+        # results in a passed test with name 'test 1 There is a description'
+        self.tap.write(_u("ok There is a description\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1 There is a description',
+            'success', None, False, None, None, True, None, None, None)])
+
+    def test_ok_SKIP_skip(self):
+        # A file
+        # ok # SKIP
+        # results in a skkip test with name 'test 1'
+        self.tap.write(_u("ok # SKIP\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'skip', None, False, None,
+            None, True, None, None, None)])
+
+    def test_ok_skip_number_comment_lowercase(self):
+        self.tap.write(_u("ok 1 # skip no samba environment available, skipping compilation\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'skip', None, False, 'tap comment',
+            b'no samba environment available, skipping compilation', True,
+            'text/plain; charset=UTF8', None, None)])
+
+    def test_ok_number_description_SKIP_skip_comment(self):
+        # A file
+        # ok 1 foo  # SKIP Not done yet
+        # results in a skip test with name 'test 1 foo' and a log of
+        # Not done yet
+        self.tap.write(_u("ok 1 foo  # SKIP Not done yet\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1 foo', 'skip', None, False,
+            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+            None, None)])
+
+    def test_ok_SKIP_skip_comment(self):
+        # A file
+        # ok # SKIP Not done yet
+        # results in a skip test with name 'test 1' and a log of Not done yet
+        self.tap.write(_u("ok # SKIP Not done yet\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'skip', None, False,
+            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+            None, None)])
+
+    def test_ok_TODO_xfail(self):
+        # A file
+        # ok # TODO
+        # results in a xfail test with name 'test 1'
+        self.tap.write(_u("ok # TODO\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'xfail', None, False, None,
+            None, True, None, None, None)])
+
+    def test_ok_TODO_xfail_comment(self):
+        # A file
+        # ok # TODO Not done yet
+        # results in a xfail test with name 'test 1' and a log of Not done yet
+        self.tap.write(_u("ok # TODO Not done yet\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([('status', 'test 1', 'xfail', None, False,
+            'tap comment', b'Not done yet', True, 'text/plain; charset=UTF8',
+            None, None)])
+
+    def test_bail_out_errors(self):
+        # A file with line in it
+        # Bail out! COMMENT
+        # is treated as an error
+        self.tap.write(_u("ok 1 foo\n"))
+        self.tap.write(_u("Bail out! Lifejacket engaged\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 foo', 'success', None, False, None, None, True,
+             None, None, None),
+            ('status', 'Bail out! Lifejacket engaged', 'fail', None, False,
+             None, None, True, None, None, None)])
+
+    def test_missing_test_at_end_with_plan_adds_error(self):
+        # A file
+        # 1..3
+        # ok first test
+        # not ok third test
+        # results in three tests, with the third being created
+        self.tap.write(_u('1..3\n'))
+        self.tap.write(_u('ok first test\n'))
+        self.tap.write(_u('not ok second test\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 first test', 'success', None, False, None,
+             None, True, None, None, None),
+            ('status', 'test 2 second test', 'fail', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 3', 'fail', None, False, 'tap meta',
+             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+             None, None)])
+
+    def test_missing_test_with_plan_adds_error(self):
+        # A file
+        # 1..3
+        # ok first test
+        # not ok 3 third test
+        # results in three tests, with the second being created
+        self.tap.write(_u('1..3\n'))
+        self.tap.write(_u('ok first test\n'))
+        self.tap.write(_u('not ok 3 third test\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 first test', 'success', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 2', 'fail', None, False, 'tap meta',
+             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+             None, None),
+            ('status', 'test 3 third test', 'fail', None, False, None, None,
+             True, None, None, None)])
+
+    def test_missing_test_no_plan_adds_error(self):
+        # A file
+        # ok first test
+        # not ok 3 third test
+        # results in three tests, with the second being created
+        self.tap.write(_u('ok first test\n'))
+        self.tap.write(_u('not ok 3 third test\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 first test', 'success', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 2', 'fail', None, False, 'tap meta',
+             b'test missing from TAP output', True, 'text/plain; charset=UTF8',
+             None, None),
+            ('status', 'test 3 third test', 'fail', None, False, None, None,
+             True, None, None, None)])
+
+    def test_four_tests_in_a_row_trailing_plan(self):
+        # A file
+        # ok 1 - first test in a script with no plan at all
+        # not ok 2 - second
+        # ok 3 - third
+        # not ok 4 - fourth
+        # 1..4
+        # results in four tests numbered and named
+        self.tap.write(_u('ok 1 - first test in a script with trailing plan\n'))
+        self.tap.write(_u('not ok 2 - second\n'))
+        self.tap.write(_u('ok 3 - third\n'))
+        self.tap.write(_u('not ok 4 - fourth\n'))
+        self.tap.write(_u('1..4\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 - first test in a script with trailing plan',
+             'success', None, False, None, None, True, None, None, None),
+            ('status', 'test 2 - second', 'fail', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 3 - third', 'success', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+             True, None, None, None)])
+
+    def test_four_tests_in_a_row_with_plan(self):
+        # A file
+        # 1..4
+        # ok 1 - first test in a script with no plan at all
+        # not ok 2 - second
+        # ok 3 - third
+        # not ok 4 - fourth
+        # results in four tests numbered and named
+        self.tap.write(_u('1..4\n'))
+        self.tap.write(_u('ok 1 - first test in a script with a plan\n'))
+        self.tap.write(_u('not ok 2 - second\n'))
+        self.tap.write(_u('ok 3 - third\n'))
+        self.tap.write(_u('not ok 4 - fourth\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 - first test in a script with a plan',
+             'success', None, False, None, None, True, None, None, None),
+            ('status', 'test 2 - second', 'fail', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 3 - third', 'success', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+             True, None, None, None)])
+
+    def test_four_tests_in_a_row_no_plan(self):
+        # A file
+        # ok 1 - first test in a script with no plan at all
+        # not ok 2 - second
+        # ok 3 - third
+        # not ok 4 - fourth
+        # results in four tests numbered and named
+        self.tap.write(_u('ok 1 - first test in a script with no plan at all\n'))
+        self.tap.write(_u('not ok 2 - second\n'))
+        self.tap.write(_u('ok 3 - third\n'))
+        self.tap.write(_u('not ok 4 - fourth\n'))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1 - first test in a script with no plan at all',
+             'success', None, False, None, None, True, None, None, None),
+            ('status', 'test 2 - second', 'fail', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 3 - third', 'success', None, False, None, None,
+             True, None, None, None),
+            ('status', 'test 4 - fourth', 'fail', None, False, None, None,
+             True, None, None, None)])
+
+    def test_todo_and_skip(self):
+        # A file
+        # not ok 1 - a fail but # TODO but is TODO
+        # not ok 2 - another fail # SKIP instead
+        # results in two tests, numbered and commented.
+        self.tap.write(_u("not ok 1 - a fail but # TODO but is TODO\n"))
+        self.tap.write(_u("not ok 2 - another fail # SKIP instead\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.subunit.seek(0)
+        events = StreamResult()
+        subunit.ByteStreamToStreamResult(self.subunit).run(events)
+        self.check_events([
+            ('status', 'test 1 - a fail but', 'xfail', None, False,
+             'tap comment', b'but is TODO', True, 'text/plain; charset=UTF8',
+             None, None),
+            ('status', 'test 2 - another fail', 'skip', None, False,
+             'tap comment', b'instead', True, 'text/plain; charset=UTF8',
+             None, None)])
+
+    def test_leading_comments_add_to_next_test_log(self):
+        # A file
+        # # comment
+        # ok
+        # ok
+        # results in a single test with the comment included
+        # in the first test and not the second.
+        self.tap.write(_u("# comment\n"))
+        self.tap.write(_u("ok\n"))
+        self.tap.write(_u("ok\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1', 'success', None, False, 'tap comment',
+             b'# comment', True, 'text/plain; charset=UTF8', None, None),
+            ('status', 'test 2', 'success', None, False, None, None, True,
+             None, None, None)])
+
+    def test_trailing_comments_are_included_in_last_test_log(self):
+        # A file
+        # ok foo
+        # ok foo
+        # # comment
+        # results in a two tests, with the second having the comment
+        # attached to its log.
+        self.tap.write(_u("ok\n"))
+        self.tap.write(_u("ok\n"))
+        self.tap.write(_u("# comment\n"))
+        self.tap.seek(0)
+        result = subunit.TAP2SubUnit(self.tap, self.subunit)
+        self.assertEqual(0, result)
+        self.check_events([
+            ('status', 'test 1', 'success', None, False, None, None, True,
+             None, None, None),
+            ('status', 'test 2', 'success', None, False, 'tap comment',
+             b'# comment', True, 'text/plain; charset=UTF8', None, None)])
+
+    def check_events(self, events):
+        self.subunit.seek(0)
+        eventstream = StreamResult()
+        subunit.ByteStreamToStreamResult(self.subunit).run(eventstream)
+        self.assertEqual(events, eventstream._events)
diff --git a/third_party/subunit/python/subunit/tests/test_test_protocol.py b/third_party/subunit/python/subunit/tests/test_test_protocol.py
new file mode 100644
index 0000000..c6008f4
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_test_protocol.py
@@ -0,0 +1,1362 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2005  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import datetime
+import unittest
+import os
+
+from testtools import PlaceHolder, skipIf, TestCase, TestResult
+from testtools.compat import _b, _u, BytesIO
+from testtools.content import Content, TracebackContent, text_content
+from testtools.content_type import ContentType
+try:
+    from testtools.testresult.doubles import (
+        Python26TestResult,
+        Python27TestResult,
+        ExtendedTestResult,
+        )
+except ImportError:
+    from testtools.tests.helpers import (
+        Python26TestResult,
+        Python27TestResult,
+        ExtendedTestResult,
+        )
+from testtools.matchers import Contains
+
+import subunit
+from subunit.tests import (
+    _remote_exception_repr,
+    _remote_exception_str,
+    _remote_exception_str_chunked,
+    )
+import subunit.iso8601 as iso8601
+
+
+def details_to_str(details):
+    return TestResult()._err_details_to_string(None, details=details)
+
+
+class TestTestImports(unittest.TestCase):
+
+    def test_imports(self):
+        from subunit import DiscardStream
+        from subunit import TestProtocolServer
+        from subunit import RemotedTestCase
+        from subunit import RemoteError
+        from subunit import ExecTestCase
+        from subunit import IsolatedTestCase
+        from subunit import TestProtocolClient
+        from subunit import ProtocolTestCase
+
+
+class TestDiscardStream(unittest.TestCase):
+
+    def test_write(self):
+        subunit.DiscardStream().write("content")
+
+
+class TestProtocolServerForward(unittest.TestCase):
+
+    def test_story(self):
+        client = unittest.TestResult()
+        out = BytesIO()
+        protocol = subunit.TestProtocolServer(client, forward_stream=out)
+        pipe = BytesIO(_b("test old mcdonald\n"
+                        "success old mcdonald\n"))
+        protocol.readFrom(pipe)
+        self.assertEqual(client.testsRun, 1)
+        self.assertEqual(pipe.getvalue(), out.getvalue())
+
+    def test_not_command(self):
+        client = unittest.TestResult()
+        out = BytesIO()
+        protocol = subunit.TestProtocolServer(client,
+            stream=subunit.DiscardStream(), forward_stream=out)
+        pipe = BytesIO(_b("success old mcdonald\n"))
+        protocol.readFrom(pipe)
+        self.assertEqual(client.testsRun, 0)
+        self.assertEqual(_b(""), out.getvalue())
+
+
+class TestTestProtocolServerPipe(unittest.TestCase):
+
+    def test_story(self):
+        client = unittest.TestResult()
+        protocol = subunit.TestProtocolServer(client)
+        traceback = "foo.c:53:ERROR invalid state\n"
+        pipe = BytesIO(_b("test old mcdonald\n"
+                        "success old mcdonald\n"
+                        "test bing crosby\n"
+                        "failure bing crosby [\n"
+                        +  traceback +
+                        "]\n"
+                        "test an error\n"
+                        "error an error\n"))
+        protocol.readFrom(pipe)
+        bing = subunit.RemotedTestCase("bing crosby")
+        an_error = subunit.RemotedTestCase("an error")
+        self.assertEqual(client.errors,
+                         [(an_error, _remote_exception_repr + '\n')])
+        self.assertEqual(
+            client.failures,
+            [(bing, _remote_exception_repr + ": "
+              + details_to_str({'traceback': text_content(traceback)}) + "\n")])
+        self.assertEqual(client.testsRun, 3)
+
+    def test_non_test_characters_forwarded_immediately(self):
+        pass
+
+
+class TestTestProtocolServerStartTest(unittest.TestCase):
+
+    def setUp(self):
+        self.client = Python26TestResult()
+        self.stream = BytesIO()
+        self.protocol = subunit.TestProtocolServer(self.client, self.stream)
+
+    def test_start_test(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.assertEqual(self.client._events,
+            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+    def test_start_testing(self):
+        self.protocol.lineReceived(_b("testing old mcdonald\n"))
+        self.assertEqual(self.client._events,
+            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+    def test_start_test_colon(self):
+        self.protocol.lineReceived(_b("test: old mcdonald\n"))
+        self.assertEqual(self.client._events,
+            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+    def test_indented_test_colon_ignored(self):
+        ignored_line = _b(" test: old mcdonald\n")
+        self.protocol.lineReceived(ignored_line)
+        self.assertEqual([], self.client._events)
+        self.assertEqual(self.stream.getvalue(), ignored_line)
+
+    def test_start_testing_colon(self):
+        self.protocol.lineReceived(_b("testing: old mcdonald\n"))
+        self.assertEqual(self.client._events,
+            [('startTest', subunit.RemotedTestCase("old mcdonald"))])
+
+
+class TestTestProtocolServerPassThrough(unittest.TestCase):
+
+    def setUp(self):
+        self.stdout = BytesIO()
+        self.test = subunit.RemotedTestCase("old mcdonald")
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
+
+    def keywords_before_test(self):
+        self.protocol.lineReceived(_b("failure a\n"))
+        self.protocol.lineReceived(_b("failure: a\n"))
+        self.protocol.lineReceived(_b("error a\n"))
+        self.protocol.lineReceived(_b("error: a\n"))
+        self.protocol.lineReceived(_b("success a\n"))
+        self.protocol.lineReceived(_b("success: a\n"))
+        self.protocol.lineReceived(_b("successful a\n"))
+        self.protocol.lineReceived(_b("successful: a\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.assertEqual(self.stdout.getvalue(), _b("failure a\n"
+                                                 "failure: a\n"
+                                                 "error a\n"
+                                                 "error: a\n"
+                                                 "success a\n"
+                                                 "success: a\n"
+                                                 "successful a\n"
+                                                 "successful: a\n"
+                                                 "]\n"))
+
+    def test_keywords_before_test(self):
+        self.keywords_before_test()
+        self.assertEqual(self.client._events, [])
+
+    def test_keywords_after_error(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("error old mcdonald\n"))
+        self.keywords_before_test()
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, {}),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_keywords_after_failure(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("failure old mcdonald\n"))
+        self.keywords_before_test()
+        self.assertEqual(self.client._events, [
+            ('startTest', self.test),
+            ('addFailure', self.test, {}),
+            ('stopTest', self.test),
+            ])
+
+    def test_keywords_after_success(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("success old mcdonald\n"))
+        self.keywords_before_test()
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addSuccess', self.test),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_keywords_after_test(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("failure a\n"))
+        self.protocol.lineReceived(_b("failure: a\n"))
+        self.protocol.lineReceived(_b("error a\n"))
+        self.protocol.lineReceived(_b("error: a\n"))
+        self.protocol.lineReceived(_b("success a\n"))
+        self.protocol.lineReceived(_b("success: a\n"))
+        self.protocol.lineReceived(_b("successful a\n"))
+        self.protocol.lineReceived(_b("successful: a\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.protocol.lineReceived(_b("failure old mcdonald\n"))
+        self.assertEqual(self.stdout.getvalue(), _b("test old mcdonald\n"
+                                                 "failure a\n"
+                                                 "failure: a\n"
+                                                 "error a\n"
+                                                 "error: a\n"
+                                                 "success a\n"
+                                                 "success: a\n"
+                                                 "successful a\n"
+                                                 "successful: a\n"
+                                                 "]\n"))
+        self.assertEqual(self.client._events, [
+            ('startTest', self.test),
+            ('addFailure', self.test, {}),
+            ('stopTest', self.test),
+            ])
+
+    def test_keywords_during_failure(self):
+        # A smoke test to make sure that the details parsers have control
+        # appropriately.
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("failure: old mcdonald [\n"))
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("failure a\n"))
+        self.protocol.lineReceived(_b("failure: a\n"))
+        self.protocol.lineReceived(_b("error a\n"))
+        self.protocol.lineReceived(_b("error: a\n"))
+        self.protocol.lineReceived(_b("success a\n"))
+        self.protocol.lineReceived(_b("success: a\n"))
+        self.protocol.lineReceived(_b("successful a\n"))
+        self.protocol.lineReceived(_b("successful: a\n"))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.assertEqual(self.stdout.getvalue(), _b(""))
+        details = {}
+        details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}),
+            lambda:[_b(
+            "test old mcdonald\n"
+            "failure a\n"
+            "failure: a\n"
+            "error a\n"
+            "error: a\n"
+            "success a\n"
+            "success: a\n"
+            "successful a\n"
+            "successful: a\n"
+            "]\n")])
+        self.assertEqual(self.client._events, [
+            ('startTest', self.test),
+            ('addFailure', self.test, details),
+            ('stopTest', self.test),
+            ])
+
+    def test_stdout_passthrough(self):
+        """Lines received which cannot be interpreted as any protocol action
+        should be passed through to sys.stdout.
+        """
+        bytes = _b("randombytes\n")
+        self.protocol.lineReceived(bytes)
+        self.assertEqual(self.stdout.getvalue(), bytes)
+
+
+class TestTestProtocolServerLostConnection(unittest.TestCase):
+
+    def setUp(self):
+        self.client = Python26TestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.test = subunit.RemotedTestCase("old mcdonald")
+
+    def test_lost_connection_no_input(self):
+        self.protocol.lostConnection()
+        self.assertEqual([], self.client._events)
+
+    def test_lost_connection_after_start(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lostConnection()
+        failure = subunit.RemoteError(
+            _u("lost connection during test 'old mcdonald'"))
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, failure),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_lost_connected_after_error(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("error old mcdonald\n"))
+        self.protocol.lostConnection()
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, subunit.RemoteError(_u(""))),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def do_connection_lost(self, outcome, opening):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("%s old mcdonald %s" % (outcome, opening)))
+        self.protocol.lostConnection()
+        failure = subunit.RemoteError(
+            _u("lost connection during %s report of test 'old mcdonald'") %
+            outcome)
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, failure),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_lost_connection_during_error(self):
+        self.do_connection_lost("error", "[\n")
+
+    def test_lost_connection_during_error_details(self):
+        self.do_connection_lost("error", "[ multipart\n")
+
+    def test_lost_connected_after_failure(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("failure old mcdonald\n"))
+        self.protocol.lostConnection()
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addFailure', self.test, subunit.RemoteError(_u(""))),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_lost_connection_during_failure(self):
+        self.do_connection_lost("failure", "[\n")
+
+    def test_lost_connection_during_failure_details(self):
+        self.do_connection_lost("failure", "[ multipart\n")
+
+    def test_lost_connection_after_success(self):
+        self.protocol.lineReceived(_b("test old mcdonald\n"))
+        self.protocol.lineReceived(_b("success old mcdonald\n"))
+        self.protocol.lostConnection()
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addSuccess', self.test),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_lost_connection_during_success(self):
+        self.do_connection_lost("success", "[\n")
+
+    def test_lost_connection_during_success_details(self):
+        self.do_connection_lost("success", "[ multipart\n")
+
+    def test_lost_connection_during_skip(self):
+        self.do_connection_lost("skip", "[\n")
+
+    def test_lost_connection_during_skip_details(self):
+        self.do_connection_lost("skip", "[ multipart\n")
+
+    def test_lost_connection_during_xfail(self):
+        self.do_connection_lost("xfail", "[\n")
+
+    def test_lost_connection_during_xfail_details(self):
+        self.do_connection_lost("xfail", "[ multipart\n")
+
+    def test_lost_connection_during_uxsuccess(self):
+        self.do_connection_lost("uxsuccess", "[\n")
+
+    def test_lost_connection_during_uxsuccess_details(self):
+        self.do_connection_lost("uxsuccess", "[ multipart\n")
+
+
+class TestInTestMultipart(unittest.TestCase):
+
+    def setUp(self):
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = subunit.RemotedTestCase(_u("mcdonalds farm"))
+
+    def test__outcome_sets_details_parser(self):
+        self.protocol._reading_success_details.details_parser = None
+        self.protocol._state._outcome(0, _b("mcdonalds farm [ multipart\n"),
+            None, self.protocol._reading_success_details)
+        parser = self.protocol._reading_success_details.details_parser
+        self.assertNotEqual(None, parser)
+        self.assertTrue(isinstance(parser,
+            subunit.details.MultipartDetailsParser))
+
+
+class TestTestProtocolServerAddError(unittest.TestCase):
+
+    def setUp(self):
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+    def simple_error_keyword(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        details = {}
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_simple_error(self):
+        self.simple_error_keyword("error")
+
+    def test_simple_error_colon(self):
+        self.simple_error_keyword("error:")
+
+    def test_error_empty_message(self):
+        self.protocol.lineReceived(_b("error mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}), lambda:[_b("")])
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def error_quoted_bracket(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}), lambda:[_b("]\n")])
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addError', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_error_quoted_bracket(self):
+        self.error_quoted_bracket("error")
+
+    def test_error_colon_quoted_bracket(self):
+        self.error_quoted_bracket("error:")
+
+
+class TestTestProtocolServerAddFailure(unittest.TestCase):
+
+    def setUp(self):
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+    def assertFailure(self, details):
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addFailure', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def simple_failure_keyword(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        details = {}
+        self.assertFailure(details)
+
+    def test_simple_failure(self):
+        self.simple_failure_keyword("failure")
+
+    def test_simple_failure_colon(self):
+        self.simple_failure_keyword("failure:")
+
+    def test_failure_empty_message(self):
+        self.protocol.lineReceived(_b("failure mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}), lambda:[_b("")])
+        self.assertFailure(details)
+
+    def failure_quoted_bracket(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}), lambda:[_b("]\n")])
+        self.assertFailure(details)
+
+    def test_failure_quoted_bracket(self):
+        self.failure_quoted_bracket("failure")
+
+    def test_failure_colon_quoted_bracket(self):
+        self.failure_quoted_bracket("failure:")
+
+
+class TestTestProtocolServerAddxFail(unittest.TestCase):
+    """Tests for the xfail keyword.
+
+    In Python this can thunk through to Success due to stdlib limitations (see
+    README).
+    """
+
+    def capture_expected_failure(self, test, err):
+        self._events.append((test, err))
+
+    def setup_python26(self):
+        """Setup a test object ready to be xfailed and thunk to success."""
+        self.client = Python26TestResult()
+        self.setup_protocol()
+
+    def setup_python27(self):
+        """Setup a test object ready to be xfailed."""
+        self.client = Python27TestResult()
+        self.setup_protocol()
+
+    def setup_python_ex(self):
+        """Setup a test object ready to be xfailed with details."""
+        self.client = ExtendedTestResult()
+        self.setup_protocol()
+
+    def setup_protocol(self):
+        """Setup the protocol based on self.client."""
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = self.client._events[-1][-1]
+
+    def simple_xfail_keyword(self, keyword, as_success):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        self.check_success_or_xfail(as_success)
+
+    def check_success_or_xfail(self, as_success, error_message=None):
+        if as_success:
+            self.assertEqual([
+                ('startTest', self.test),
+                ('addSuccess', self.test),
+                ('stopTest', self.test),
+                ], self.client._events)
+        else:
+            details = {}
+            if error_message is not None:
+                details['traceback'] = Content(
+                    ContentType("text", "x-traceback", {'charset': 'utf8'}),
+                    lambda:[_b(error_message)])
+            if isinstance(self.client, ExtendedTestResult):
+                value = details
+            else:
+                if error_message is not None:
+                    value = subunit.RemoteError(details_to_str(details))
+                else:
+                    value = subunit.RemoteError()
+            self.assertEqual([
+                ('startTest', self.test),
+                ('addExpectedFailure', self.test, value),
+                ('stopTest', self.test),
+                ], self.client._events)
+
+    def test_simple_xfail(self):
+        self.setup_python26()
+        self.simple_xfail_keyword("xfail", True)
+        self.setup_python27()
+        self.simple_xfail_keyword("xfail",  False)
+        self.setup_python_ex()
+        self.simple_xfail_keyword("xfail",  False)
+
+    def test_simple_xfail_colon(self):
+        self.setup_python26()
+        self.simple_xfail_keyword("xfail:", True)
+        self.setup_python27()
+        self.simple_xfail_keyword("xfail:", False)
+        self.setup_python_ex()
+        self.simple_xfail_keyword("xfail:", False)
+
+    def test_xfail_empty_message(self):
+        self.setup_python26()
+        self.empty_message(True)
+        self.setup_python27()
+        self.empty_message(False)
+        self.setup_python_ex()
+        self.empty_message(False, error_message="")
+
+    def empty_message(self, as_success, error_message="\n"):
+        self.protocol.lineReceived(_b("xfail mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.check_success_or_xfail(as_success, error_message)
+
+    def xfail_quoted_bracket(self, keyword, as_success):
+        # This tests it is accepted, but cannot test it is used today, because
+        # of not having a way to expose it in Python so far.
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.check_success_or_xfail(as_success, "]\n")
+
+    def test_xfail_quoted_bracket(self):
+        self.setup_python26()
+        self.xfail_quoted_bracket("xfail", True)
+        self.setup_python27()
+        self.xfail_quoted_bracket("xfail", False)
+        self.setup_python_ex()
+        self.xfail_quoted_bracket("xfail", False)
+
+    def test_xfail_colon_quoted_bracket(self):
+        self.setup_python26()
+        self.xfail_quoted_bracket("xfail:", True)
+        self.setup_python27()
+        self.xfail_quoted_bracket("xfail:", False)
+        self.setup_python_ex()
+        self.xfail_quoted_bracket("xfail:", False)
+
+
+class TestTestProtocolServerAddunexpectedSuccess(TestCase):
+    """Tests for the uxsuccess keyword."""
+
+    def capture_expected_failure(self, test, err):
+        self._events.append((test, err))
+
+    def setup_python26(self):
+        """Setup a test object ready to be xfailed and thunk to success."""
+        self.client = Python26TestResult()
+        self.setup_protocol()
+
+    def setup_python27(self):
+        """Setup a test object ready to be xfailed."""
+        self.client = Python27TestResult()
+        self.setup_protocol()
+
+    def setup_python_ex(self):
+        """Setup a test object ready to be xfailed with details."""
+        self.client = ExtendedTestResult()
+        self.setup_protocol()
+
+    def setup_protocol(self):
+        """Setup the protocol based on self.client."""
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = self.client._events[-1][-1]
+
+    def simple_uxsuccess_keyword(self, keyword, as_fail):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        self.check_fail_or_uxsuccess(as_fail)
+
+    def check_fail_or_uxsuccess(self, as_fail, error_message=None):
+        details = {}
+        if error_message is not None:
+            details['traceback'] = Content(
+                ContentType("text", "x-traceback", {'charset': 'utf8'}),
+                lambda:[_b(error_message)])
+        if isinstance(self.client, ExtendedTestResult):
+            value = details
+        else:
+            value = None
+        if as_fail:
+            self.client._events[1] = self.client._events[1][:2]
+            # The value is generated within the extended to original decorator:
+            # todo use the testtools matcher to check on this.
+            self.assertEqual([
+                ('startTest', self.test),
+                ('addFailure', self.test),
+                ('stopTest', self.test),
+                ], self.client._events)
+        elif value:
+            self.assertEqual([
+                ('startTest', self.test),
+                ('addUnexpectedSuccess', self.test, value),
+                ('stopTest', self.test),
+                ], self.client._events)
+        else:
+            self.assertEqual([
+                ('startTest', self.test),
+                ('addUnexpectedSuccess', self.test),
+                ('stopTest', self.test),
+                ], self.client._events)
+
+    def test_simple_uxsuccess(self):
+        self.setup_python26()
+        self.simple_uxsuccess_keyword("uxsuccess", True)
+        self.setup_python27()
+        self.simple_uxsuccess_keyword("uxsuccess",  False)
+        self.setup_python_ex()
+        self.simple_uxsuccess_keyword("uxsuccess",  False)
+
+    def test_simple_uxsuccess_colon(self):
+        self.setup_python26()
+        self.simple_uxsuccess_keyword("uxsuccess:", True)
+        self.setup_python27()
+        self.simple_uxsuccess_keyword("uxsuccess:", False)
+        self.setup_python_ex()
+        self.simple_uxsuccess_keyword("uxsuccess:", False)
+
+    def test_uxsuccess_empty_message(self):
+        self.setup_python26()
+        self.empty_message(True)
+        self.setup_python27()
+        self.empty_message(False)
+        self.setup_python_ex()
+        self.empty_message(False, error_message="")
+
+    def empty_message(self, as_fail, error_message="\n"):
+        self.protocol.lineReceived(_b("uxsuccess mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.check_fail_or_uxsuccess(as_fail, error_message)
+
+    def uxsuccess_quoted_bracket(self, keyword, as_fail):
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.check_fail_or_uxsuccess(as_fail, "]\n")
+
+    def test_uxsuccess_quoted_bracket(self):
+        self.setup_python26()
+        self.uxsuccess_quoted_bracket("uxsuccess", True)
+        self.setup_python27()
+        self.uxsuccess_quoted_bracket("uxsuccess", False)
+        self.setup_python_ex()
+        self.uxsuccess_quoted_bracket("uxsuccess", False)
+
+    def test_uxsuccess_colon_quoted_bracket(self):
+        self.setup_python26()
+        self.uxsuccess_quoted_bracket("uxsuccess:", True)
+        self.setup_python27()
+        self.uxsuccess_quoted_bracket("uxsuccess:", False)
+        self.setup_python_ex()
+        self.uxsuccess_quoted_bracket("uxsuccess:", False)
+
+
+class TestTestProtocolServerAddSkip(unittest.TestCase):
+    """Tests for the skip keyword.
+
+    In Python this meets the testtools extended TestResult contract.
+    (See https://launchpad.net/testtools).
+    """
+
+    def setUp(self):
+        """Setup a test object ready to be skipped."""
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = self.client._events[-1][-1]
+
+    def assertSkip(self, reason):
+        details = {}
+        if reason is not None:
+            details['reason'] = Content(
+                ContentType("text", "plain"), lambda:[reason])
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addSkip', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def simple_skip_keyword(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        self.assertSkip(None)
+
+    def test_simple_skip(self):
+        self.simple_skip_keyword("skip")
+
+    def test_simple_skip_colon(self):
+        self.simple_skip_keyword("skip:")
+
+    def test_skip_empty_message(self):
+        self.protocol.lineReceived(_b("skip mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.assertSkip(_b(""))
+
+    def skip_quoted_bracket(self, keyword):
+        # This tests it is accepted, but cannot test it is used today, because
+        # of not having a way to expose it in Python so far.
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        self.assertSkip(_b("]\n"))
+
+    def test_skip_quoted_bracket(self):
+        self.skip_quoted_bracket("skip")
+
+    def test_skip_colon_quoted_bracket(self):
+        self.skip_quoted_bracket("skip:")
+
+
+class TestTestProtocolServerAddSuccess(unittest.TestCase):
+
+    def setUp(self):
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        self.test = subunit.RemotedTestCase("mcdonalds farm")
+
+    def simple_success_keyword(self, keyword):
+        self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addSuccess', self.test),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_simple_success(self):
+        self.simple_success_keyword("successful")
+
+    def test_simple_success_colon(self):
+        self.simple_success_keyword("successful:")
+
+    def assertSuccess(self, details):
+        self.assertEqual([
+            ('startTest', self.test),
+            ('addSuccess', self.test, details),
+            ('stopTest', self.test),
+            ], self.client._events)
+
+    def test_success_empty_message(self):
+        self.protocol.lineReceived(_b("success mcdonalds farm [\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['message'] = Content(ContentType("text", "plain"),
+            lambda:[_b("")])
+        self.assertSuccess(details)
+
+    def success_quoted_bracket(self, keyword):
+        # This tests it is accepted, but cannot test it is used today, because
+        # of not having a way to expose it in Python so far.
+        self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
+        self.protocol.lineReceived(_b(" ]\n"))
+        self.protocol.lineReceived(_b("]\n"))
+        details = {}
+        details['message'] = Content(ContentType("text", "plain"),
+            lambda:[_b("]\n")])
+        self.assertSuccess(details)
+
+    def test_success_quoted_bracket(self):
+        self.success_quoted_bracket("success")
+
+    def test_success_colon_quoted_bracket(self):
+        self.success_quoted_bracket("success:")
+
+
+class TestTestProtocolServerProgress(unittest.TestCase):
+    """Test receipt of progress: directives."""
+
+    def test_progress_accepted_stdlib(self):
+        self.result = Python26TestResult()
+        self.stream = BytesIO()
+        self.protocol = subunit.TestProtocolServer(self.result,
+            stream=self.stream)
+        self.protocol.lineReceived(_b("progress: 23"))
+        self.protocol.lineReceived(_b("progress: -2"))
+        self.protocol.lineReceived(_b("progress: +4"))
+        self.assertEqual(_b(""), self.stream.getvalue())
+
+    def test_progress_accepted_extended(self):
+        # With a progress capable TestResult, progress events are emitted.
+        self.result = ExtendedTestResult()
+        self.stream = BytesIO()
+        self.protocol = subunit.TestProtocolServer(self.result,
+            stream=self.stream)
+        self.protocol.lineReceived(_b("progress: 23"))
+        self.protocol.lineReceived(_b("progress: push"))
+        self.protocol.lineReceived(_b("progress: -2"))
+        self.protocol.lineReceived(_b("progress: pop"))
+        self.protocol.lineReceived(_b("progress: +4"))
+        self.assertEqual(_b(""), self.stream.getvalue())
+        self.assertEqual([
+            ('progress', 23, subunit.PROGRESS_SET),
+            ('progress', None, subunit.PROGRESS_PUSH),
+            ('progress', -2, subunit.PROGRESS_CUR),
+            ('progress', None, subunit.PROGRESS_POP),
+            ('progress', 4, subunit.PROGRESS_CUR),
+            ], self.result._events)
+
+
+class TestTestProtocolServerStreamTags(unittest.TestCase):
+    """Test managing tags on the protocol level."""
+
+    def setUp(self):
+        self.client = ExtendedTestResult()
+        self.protocol = subunit.TestProtocolServer(self.client)
+
+    def test_initial_tags(self):
+        self.protocol.lineReceived(_b("tags: foo bar:baz  quux\n"))
+        self.assertEqual([
+            ('tags', set(["foo", "bar:baz", "quux"]), set()),
+            ], self.client._events)
+
+    def test_minus_removes_tags(self):
+        self.protocol.lineReceived(_b("tags: -bar quux\n"))
+        self.assertEqual([
+            ('tags', set(["quux"]), set(["bar"])),
+            ], self.client._events)
+
+    def test_tags_do_not_get_set_on_test(self):
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        test = self.client._events[0][-1]
+        self.assertEqual(None, getattr(test, 'tags', None))
+
+    def test_tags_do_not_get_set_on_global_tags(self):
+        self.protocol.lineReceived(_b("tags: foo bar\n"))
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        test = self.client._events[-1][-1]
+        self.assertEqual(None, getattr(test, 'tags', None))
+
+    def test_tags_get_set_on_test_tags(self):
+        self.protocol.lineReceived(_b("test mcdonalds farm\n"))
+        test = self.client._events[-1][-1]
+        self.protocol.lineReceived(_b("tags: foo bar\n"))
+        self.protocol.lineReceived(_b("success mcdonalds farm\n"))
+        self.assertEqual(None, getattr(test, 'tags', None))
+
+
+class TestTestProtocolServerStreamTime(unittest.TestCase):
+    """Test managing time information at the protocol level."""
+
+    def test_time_accepted_stdlib(self):
+        self.result = Python26TestResult()
+        self.stream = BytesIO()
+        self.protocol = subunit.TestProtocolServer(self.result,
+            stream=self.stream)
+        self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+        self.assertEqual(_b(""), self.stream.getvalue())
+
+    def test_time_accepted_extended(self):
+        self.result = ExtendedTestResult()
+        self.stream = BytesIO()
+        self.protocol = subunit.TestProtocolServer(self.result,
+            stream=self.stream)
+        self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
+        self.assertEqual(_b(""), self.stream.getvalue())
+        self.assertEqual([
+            ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
+            iso8601.Utc()))
+            ], self.result._events)
+
+
+class TestRemotedTestCase(unittest.TestCase):
+
+    def test_simple(self):
+        test = subunit.RemotedTestCase("A test description")
+        self.assertRaises(NotImplementedError, test.setUp)
+        self.assertRaises(NotImplementedError, test.tearDown)
+        self.assertEqual("A test description",
+                         test.shortDescription())
+        self.assertEqual("A test description",
+                         test.id())
+        self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
+        self.assertEqual("<subunit.RemotedTestCase description="
+                         "'A test description'>", "%r" % test)
+        result = unittest.TestResult()
+        test.run(result)
+        self.assertEqual([(test, _remote_exception_repr + ": "
+                                 "Cannot run RemotedTestCases.\n\n")],
+                         result.errors)
+        self.assertEqual(1, result.testsRun)
+        another_test = subunit.RemotedTestCase("A test description")
+        self.assertEqual(test, another_test)
+        different_test = subunit.RemotedTestCase("ofo")
+        self.assertNotEqual(test, different_test)
+        self.assertNotEqual(another_test, different_test)
+
+
+class TestRemoteError(unittest.TestCase):
+
+    def test_eq(self):
+        error = subunit.RemoteError(_u("Something went wrong"))
+        another_error = subunit.RemoteError(_u("Something went wrong"))
+        different_error = subunit.RemoteError(_u("boo!"))
+        self.assertEqual(error, another_error)
+        self.assertNotEqual(error, different_error)
+        self.assertNotEqual(different_error, another_error)
+
+    def test_empty_constructor(self):
+        self.assertEqual(subunit.RemoteError(), subunit.RemoteError(_u("")))
+
+
+class TestExecTestCase(unittest.TestCase):
+
+    class SampleExecTestCase(subunit.ExecTestCase):
+
+        def test_sample_method(self):
+            """sample-script.py"""
+            # the sample script runs three tests, one each
+            # that fails, errors and succeeds
+
+        def test_sample_method_args(self):
+            """sample-script.py foo"""
+            # sample that will run just one test.
+
+    def test_construct(self):
+        test = self.SampleExecTestCase("test_sample_method")
+        self.assertEqual(test.script,
+                         subunit.join_dir(__file__, 'sample-script.py'))
+
+    def test_args(self):
+        result = unittest.TestResult()
+        test = self.SampleExecTestCase("test_sample_method_args")
+        test.run(result)
+        self.assertEqual(1, result.testsRun)
+
+    def test_run(self):
+        result = ExtendedTestResult()
+        test = self.SampleExecTestCase("test_sample_method")
+        test.run(result)
+        mcdonald = subunit.RemotedTestCase("old mcdonald")
+        bing = subunit.RemotedTestCase("bing crosby")
+        bing_details = {}
+        bing_details['traceback'] = Content(ContentType("text", "x-traceback",
+            {'charset': 'utf8'}), lambda:[_b("foo.c:53:ERROR invalid state\n")])
+        an_error = subunit.RemotedTestCase("an error")
+        error_details = {}
+        self.assertEqual([
+            ('startTest', mcdonald),
+            ('addSuccess', mcdonald),
+            ('stopTest', mcdonald),
+            ('startTest', bing),
+            ('addFailure', bing, bing_details),
+            ('stopTest', bing),
+            ('startTest', an_error),
+            ('addError', an_error, error_details),
+            ('stopTest', an_error),
+            ], result._events)
+
+    def test_debug(self):
+        test = self.SampleExecTestCase("test_sample_method")
+        test.debug()
+
+    def test_count_test_cases(self):
+        """TODO run the child process and count responses to determine the count."""
+
+    def test_join_dir(self):
+        sibling = subunit.join_dir(__file__, 'foo')
+        filedir = os.path.abspath(os.path.dirname(__file__))
+        expected = os.path.join(filedir, 'foo')
+        self.assertEqual(sibling, expected)
+
+
+class DoExecTestCase(subunit.ExecTestCase):
+
+    def test_working_script(self):
+        """sample-two-script.py"""
+
+
+class TestIsolatedTestCase(TestCase):
+
+    class SampleIsolatedTestCase(subunit.IsolatedTestCase):
+
+        SETUP = False
+        TEARDOWN = False
+        TEST = False
+
+        def setUp(self):
+            TestIsolatedTestCase.SampleIsolatedTestCase.SETUP = True
+
+        def tearDown(self):
+            TestIsolatedTestCase.SampleIsolatedTestCase.TEARDOWN = True
+
+        def test_sets_global_state(self):
+            TestIsolatedTestCase.SampleIsolatedTestCase.TEST = True
+
+
+    def test_construct(self):
+        self.SampleIsolatedTestCase("test_sets_global_state")
+
+    @skipIf(os.name != "posix", "Need a posix system for forking tests")
+    def test_run(self):
+        result = unittest.TestResult()
+        test = self.SampleIsolatedTestCase("test_sets_global_state")
+        test.run(result)
+        self.assertEqual(result.testsRun, 1)
+        self.assertEqual(self.SampleIsolatedTestCase.SETUP, False)
+        self.assertEqual(self.SampleIsolatedTestCase.TEARDOWN, False)
+        self.assertEqual(self.SampleIsolatedTestCase.TEST, False)
+
+    def test_debug(self):
+        pass
+        #test = self.SampleExecTestCase("test_sample_method")
+        #test.debug()
+
+
+class TestIsolatedTestSuite(TestCase):
+
+    class SampleTestToIsolate(unittest.TestCase):
+
+        SETUP = False
+        TEARDOWN = False
+        TEST = False
+
+        def setUp(self):
+            TestIsolatedTestSuite.SampleTestToIsolate.SETUP = True
+
+        def tearDown(self):
+            TestIsolatedTestSuite.SampleTestToIsolate.TEARDOWN = True
+
+        def test_sets_global_state(self):
+            TestIsolatedTestSuite.SampleTestToIsolate.TEST = True
+
+
+    def test_construct(self):
+        subunit.IsolatedTestSuite()
+
+    @skipIf(os.name != "posix", "Need a posix system for forking tests")
+    def test_run(self):
+        result = unittest.TestResult()
+        suite = subunit.IsolatedTestSuite()
+        sub_suite = unittest.TestSuite()
+        sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+        sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+        suite.addTest(sub_suite)
+        suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
+        suite.run(result)
+        self.assertEqual(result.testsRun, 3)
+        self.assertEqual(self.SampleTestToIsolate.SETUP, False)
+        self.assertEqual(self.SampleTestToIsolate.TEARDOWN, False)
+        self.assertEqual(self.SampleTestToIsolate.TEST, False)
+
+
+class TestTestProtocolClient(TestCase):
+
+    def setUp(self):
+        super(TestTestProtocolClient, self).setUp()
+        self.io = BytesIO()
+        self.protocol = subunit.TestProtocolClient(self.io)
+        self.unicode_test = PlaceHolder(_u('\u2603'))
+        self.test = TestTestProtocolClient("test_start_test")
+        self.sample_details = {'something':Content(
+            ContentType('text', 'plain'), lambda:[_b('serialised\nform')])}
+        self.sample_tb_details = dict(self.sample_details)
+        self.sample_tb_details['traceback'] = TracebackContent(
+            subunit.RemoteError(_u("boo qux")), self.test)
+
+    def test_start_test(self):
+        """Test startTest on a TestProtocolClient."""
+        self.protocol.startTest(self.test)
+        self.assertEqual(self.io.getvalue(), _b("test: %s\n" % self.test.id()))
+
+    def test_start_test_unicode_id(self):
+        """Test startTest on a TestProtocolClient."""
+        self.protocol.startTest(self.unicode_test)
+        expected = _b("test: ") + _u('\u2603').encode('utf8') + _b("\n")
+        self.assertEqual(expected, self.io.getvalue())
+
+    def test_stop_test(self):
+        # stopTest doesn't output anything.
+        self.protocol.stopTest(self.test)
+        self.assertEqual(self.io.getvalue(), _b(""))
+
+    def test_add_success(self):
+        """Test addSuccess on a TestProtocolClient."""
+        self.protocol.addSuccess(self.test)
+        self.assertEqual(
+            self.io.getvalue(), _b("successful: %s\n" % self.test.id()))
+
+    def test_add_outcome_unicode_id(self):
+        """Test addSuccess on a TestProtocolClient."""
+        self.protocol.addSuccess(self.unicode_test)
+        expected = _b("successful: ") + _u('\u2603').encode('utf8') + _b("\n")
+        self.assertEqual(expected, self.io.getvalue())
+
+    def test_add_success_details(self):
+        """Test addSuccess on a TestProtocolClient with details."""
+        self.protocol.addSuccess(self.test, details=self.sample_details)
+        self.assertEqual(
+            self.io.getvalue(), _b("successful: %s [ multipart\n"
+                "Content-Type: text/plain\n"
+                "something\n"
+                "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+    def test_add_failure(self):
+        """Test addFailure on a TestProtocolClient."""
+        self.protocol.addFailure(
+            self.test, subunit.RemoteError(_u("boo qux")))
+        self.assertEqual(
+            self.io.getvalue(),
+            _b(('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
+            % self.test.id()))
+
+    def test_add_failure_details(self):
+        """Test addFailure on a TestProtocolClient with details."""
+        self.protocol.addFailure(
+            self.test, details=self.sample_tb_details)
+        self.assertThat([
+            _b(("failure: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            _b(("failure: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            ],
+            Contains(self.io.getvalue())),
+
+    def test_add_error(self):
+        """Test stopTest on a TestProtocolClient."""
+        self.protocol.addError(
+            self.test, subunit.RemoteError(_u("phwoar crikey")))
+        self.assertEqual(
+            self.io.getvalue(),
+            _b(('error: %s [\n' +
+            _remote_exception_str + ": phwoar crikey\n"
+            "]\n") % self.test.id()))
+
+    def test_add_error_details(self):
+        """Test stopTest on a TestProtocolClient with details."""
+        self.protocol.addError(
+            self.test, details=self.sample_tb_details)
+        self.assertThat([
+            _b(("error: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            _b(("error: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            ],
+            Contains(self.io.getvalue())),
+
+    def test_add_expected_failure(self):
+        """Test addExpectedFailure on a TestProtocolClient."""
+        self.protocol.addExpectedFailure(
+            self.test, subunit.RemoteError(_u("phwoar crikey")))
+        self.assertEqual(
+            self.io.getvalue(),
+            _b(('xfail: %s [\n' +
+            _remote_exception_str + ": phwoar crikey\n"
+            "]\n") % self.test.id()))
+
+    def test_add_expected_failure_details(self):
+        """Test addExpectedFailure on a TestProtocolClient with details."""
+        self.protocol.addExpectedFailure(
+            self.test, details=self.sample_tb_details)
+        self.assertThat([
+            _b(("xfail: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;charset=utf8,language=python\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            _b(("xfail: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "something\n"
+            "F\r\nserialised\nform0\r\n"
+            "Content-Type: text/x-traceback;language=python,charset=utf8\n"
+            "traceback\n" + _remote_exception_str_chunked +
+            "]\n") % self.test.id()),
+            ],
+            Contains(self.io.getvalue())),
+
+    def test_add_skip(self):
+        """Test addSkip on a TestProtocolClient."""
+        self.protocol.addSkip(
+            self.test, "Has it really?")
+        self.assertEqual(
+            self.io.getvalue(),
+            _b('skip: %s [\nHas it really?\n]\n' % self.test.id()))
+
+    def test_add_skip_details(self):
+        """Test addSkip on a TestProtocolClient with details."""
+        details = {'reason':Content(
+            ContentType('text', 'plain'), lambda:[_b('Has it really?')])}
+        self.protocol.addSkip(self.test, details=details)
+        self.assertEqual(
+            self.io.getvalue(),
+            _b("skip: %s [ multipart\n"
+            "Content-Type: text/plain\n"
+            "reason\n"
+            "E\r\nHas it really?0\r\n"
+            "]\n" % self.test.id()))
+
+    def test_progress_set(self):
+        self.protocol.progress(23, subunit.PROGRESS_SET)
+        self.assertEqual(self.io.getvalue(), _b('progress: 23\n'))
+
+    def test_progress_neg_cur(self):
+        self.protocol.progress(-23, subunit.PROGRESS_CUR)
+        self.assertEqual(self.io.getvalue(), _b('progress: -23\n'))
+
+    def test_progress_pos_cur(self):
+        self.protocol.progress(23, subunit.PROGRESS_CUR)
+        self.assertEqual(self.io.getvalue(), _b('progress: +23\n'))
+
+    def test_progress_pop(self):
+        self.protocol.progress(1234, subunit.PROGRESS_POP)
+        self.assertEqual(self.io.getvalue(), _b('progress: pop\n'))
+
+    def test_progress_push(self):
+        self.protocol.progress(1234, subunit.PROGRESS_PUSH)
+        self.assertEqual(self.io.getvalue(), _b('progress: push\n'))
+
+    def test_time(self):
+        # Calling time() outputs a time signal immediately.
+        self.protocol.time(
+            datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
+        self.assertEqual(
+            _b("time: 2009-10-11 12:13:14.000015Z\n"),
+            self.io.getvalue())
+
+    def test_add_unexpected_success(self):
+        """Test addUnexpectedSuccess on a TestProtocolClient."""
+        self.protocol.addUnexpectedSuccess(self.test)
+        self.assertEqual(
+            self.io.getvalue(), _b("uxsuccess: %s\n" % self.test.id()))
+
+    def test_add_unexpected_success_details(self):
+        """Test addUnexpectedSuccess on a TestProtocolClient with details."""
+        self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
+        self.assertEqual(
+            self.io.getvalue(), _b("uxsuccess: %s [ multipart\n"
+                "Content-Type: text/plain\n"
+                "something\n"
+                "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
+
+    def test_tags_empty(self):
+        self.protocol.tags(set(), set())
+        self.assertEqual(_b(""), self.io.getvalue())
+
+    def test_tags_add(self):
+        self.protocol.tags(set(['foo']), set())
+        self.assertEqual(_b("tags: foo\n"), self.io.getvalue())
+
+    def test_tags_both(self):
+        self.protocol.tags(set(['quux']), set(['bar']))
+        self.assertThat(
+            [b"tags: quux -bar\n", b"tags: -bar quux\n"],
+            Contains(self.io.getvalue()))
+
+    def test_tags_gone(self):
+        self.protocol.tags(set(), set(['bar']))
+        self.assertEqual(_b("tags: -bar\n"), self.io.getvalue())
diff --git a/third_party/subunit/python/subunit/tests/test_test_protocol2.py b/third_party/subunit/python/subunit/tests/test_test_protocol2.py
new file mode 100644
index 0000000..c21392c
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_test_protocol2.py
@@ -0,0 +1,436 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+from io import BytesIO
+import datetime
+
+from testtools import TestCase
+from testtools.matchers import Contains, HasLength
+from testtools.tests.test_testresult import TestStreamResultContract
+from testtools.testresult.doubles import StreamResult
+
+import subunit
+import subunit.iso8601 as iso8601
+
+CONSTANT_ENUM = b'\xb3)\x01\x0c\x03foo\x08U_\x1b'
+CONSTANT_INPROGRESS = b'\xb3)\x02\x0c\x03foo\x8e\xc1-\xb5'
+CONSTANT_SUCCESS = b'\xb3)\x03\x0c\x03fooE\x9d\xfe\x10'
+CONSTANT_UXSUCCESS = b'\xb3)\x04\x0c\x03fooX\x98\xce\xa8'
+CONSTANT_SKIP = b'\xb3)\x05\x0c\x03foo\x93\xc4\x1d\r'
+CONSTANT_FAIL = b'\xb3)\x06\x0c\x03foo\x15Po\xa3'
+CONSTANT_XFAIL = b'\xb3)\x07\x0c\x03foo\xde\x0c\xbc\x06'
+CONSTANT_EOF = b'\xb3!\x10\x08S\x15\x88\xdc'
+CONSTANT_FILE_CONTENT = b'\xb3!@\x13\x06barney\x03wooA5\xe3\x8c'
+CONSTANT_MIME = b'\xb3! #\x1aapplication/foo; charset=1x3Q\x15'
+CONSTANT_TIMESTAMP = b'\xb3+\x03\x13<\x17T\xcf\x80\xaf\xc8\x03barI\x96>-'
+CONSTANT_ROUTE_CODE = b'\xb3-\x03\x13\x03bar\x06source\x9cY9\x19'
+CONSTANT_RUNNABLE = b'\xb3(\x03\x0c\x03foo\xe3\xea\xf5\xa4'
+CONSTANT_TAGS = [
+    b'\xb3)\x80\x15\x03bar\x02\x03foo\x03barTHn\xb4',
+    b'\xb3)\x80\x15\x03bar\x02\x03bar\x03foo\xf8\xf1\x91o',
+    ]
+
+
+class TestStreamResultToBytesContract(TestCase, TestStreamResultContract):
+    """Check that StreamResult behaves as testtools expects."""
+
+    def _make_result(self):
+        return subunit.StreamResultToBytes(BytesIO())
+
+
+class TestStreamResultToBytes(TestCase):
+
+    def _make_result(self):
+        output = BytesIO()
+        return subunit.StreamResultToBytes(output), output
+
+    def test_numbers(self):
+        result = subunit.StreamResultToBytes(BytesIO())
+        packet = []
+        self.assertRaises(Exception, result._write_number, -1, packet)
+        self.assertEqual([], packet)
+        result._write_number(0, packet)
+        self.assertEqual([b'\x00'], packet)
+        del packet[:]
+        result._write_number(63, packet)
+        self.assertEqual([b'\x3f'], packet)
+        del packet[:]
+        result._write_number(64, packet)
+        self.assertEqual([b'\x40\x40'], packet)
+        del packet[:]
+        result._write_number(16383, packet)
+        self.assertEqual([b'\x7f\xff'], packet)
+        del packet[:]
+        result._write_number(16384, packet)
+        self.assertEqual([b'\x80\x40', b'\x00'], packet)
+        del packet[:]
+        result._write_number(4194303, packet)
+        self.assertEqual([b'\xbf\xff', b'\xff'], packet)
+        del packet[:]
+        result._write_number(4194304, packet)
+        self.assertEqual([b'\xc0\x40\x00\x00'], packet)
+        del packet[:]
+        result._write_number(1073741823, packet)
+        self.assertEqual([b'\xff\xff\xff\xff'], packet)
+        del packet[:]
+        self.assertRaises(Exception, result._write_number, 1073741824, packet)
+        self.assertEqual([], packet)
+
+    def test_volatile_length(self):
+        # if the length of the packet data before the length itself is
+        # considered is right on the boundary for length's variable length
+        # encoding, it is easy to get the length wrong by not accounting for
+        # length itself.
+        # that is, the encoder has to ensure that length == sum (length_of_rest
+        # + length_of_length)
+        result, output = self._make_result()
+        # 1 byte short:
+        result.status(file_name="", file_bytes=b'\xff'*0)
+        self.assertThat(output.getvalue(), HasLength(10))
+        self.assertEqual(b'\x0a', output.getvalue()[3:4])
+        output.seek(0)
+        output.truncate()
+        # 1 byte long:
+        result.status(file_name="", file_bytes=b'\xff'*53)
+        self.assertThat(output.getvalue(), HasLength(63))
+        self.assertEqual(b'\x3f', output.getvalue()[3:4])
+        output.seek(0)
+        output.truncate()
+        # 2 bytes short
+        result.status(file_name="", file_bytes=b'\xff'*54)
+        self.assertThat(output.getvalue(), HasLength(65))
+        self.assertEqual(b'\x40\x41', output.getvalue()[3:5])
+        output.seek(0)
+        output.truncate()
+        # 2 bytes long
+        result.status(file_name="", file_bytes=b'\xff'*16371)
+        self.assertThat(output.getvalue(), HasLength(16383))
+        self.assertEqual(b'\x7f\xff', output.getvalue()[3:5])
+        output.seek(0)
+        output.truncate()
+        # 3 bytes short
+        result.status(file_name="", file_bytes=b'\xff'*16372)
+        self.assertThat(output.getvalue(), HasLength(16385))
+        self.assertEqual(b'\x80\x40\x01', output.getvalue()[3:6])
+        output.seek(0)
+        output.truncate()
+        # 3 bytes long
+        result.status(file_name="", file_bytes=b'\xff'*4194289)
+        self.assertThat(output.getvalue(), HasLength(4194303))
+        self.assertEqual(b'\xbf\xff\xff', output.getvalue()[3:6])
+        output.seek(0)
+        output.truncate()
+        self.assertRaises(Exception, result.status, file_name="",
+            file_bytes=b'\xff'*4194290)
+
+    def test_trivial_enumeration(self):
+        result, output = self._make_result()
+        result.status("foo", 'exists')
+        self.assertEqual(CONSTANT_ENUM, output.getvalue())
+
+    def test_inprogress(self):
+        result, output = self._make_result()
+        result.status("foo", 'inprogress')
+        self.assertEqual(CONSTANT_INPROGRESS, output.getvalue())
+
+    def test_success(self):
+        result, output = self._make_result()
+        result.status("foo", 'success')
+        self.assertEqual(CONSTANT_SUCCESS, output.getvalue())
+
+    def test_uxsuccess(self):
+        result, output = self._make_result()
+        result.status("foo", 'uxsuccess')
+        self.assertEqual(CONSTANT_UXSUCCESS, output.getvalue())
+
+    def test_skip(self):
+        result, output = self._make_result()
+        result.status("foo", 'skip')
+        self.assertEqual(CONSTANT_SKIP, output.getvalue())
+
+    def test_fail(self):
+        result, output = self._make_result()
+        result.status("foo", 'fail')
+        self.assertEqual(CONSTANT_FAIL, output.getvalue())
+
+    def test_xfail(self):
+        result, output = self._make_result()
+        result.status("foo", 'xfail')
+        self.assertEqual(CONSTANT_XFAIL, output.getvalue())
+
+    def test_unknown_status(self):
+        result, output = self._make_result()
+        self.assertRaises(Exception, result.status, "foo", 'boo')
+        self.assertEqual(b'', output.getvalue())
+
+    def test_eof(self):
+        result, output = self._make_result()
+        result.status(eof=True)
+        self.assertEqual(CONSTANT_EOF, output.getvalue())
+
+    def test_file_content(self):
+        result, output = self._make_result()
+        result.status(file_name="barney", file_bytes=b"woo")
+        self.assertEqual(CONSTANT_FILE_CONTENT, output.getvalue())
+
+    def test_mime(self):
+        result, output = self._make_result()
+        result.status(mime_type="application/foo; charset=1")
+        self.assertEqual(CONSTANT_MIME, output.getvalue())
+
+    def test_route_code(self):
+        result, output = self._make_result()
+        result.status(test_id="bar", test_status='success',
+            route_code="source")
+        self.assertEqual(CONSTANT_ROUTE_CODE, output.getvalue())
+
+    def test_runnable(self):
+        result, output = self._make_result()
+        result.status("foo", 'success', runnable=False)
+        self.assertEqual(CONSTANT_RUNNABLE, output.getvalue())
+
+    def test_tags(self):
+        result, output = self._make_result()
+        result.status(test_id="bar", test_tags=set(['foo', 'bar']))
+        self.assertThat(CONSTANT_TAGS, Contains(output.getvalue()))
+
+    def test_timestamp(self):
+        timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+            iso8601.Utc())
+        result, output = self._make_result()
+        result.status(test_id="bar", test_status='success', timestamp=timestamp)
+        self.assertEqual(CONSTANT_TIMESTAMP, output.getvalue())
+
+
+class TestByteStreamToStreamResult(TestCase):
+
+    def test_non_subunit_encapsulated(self):
+        source = BytesIO(b"foo\nbar\n")
+        result = StreamResult()
+        subunit.ByteStreamToStreamResult(
+            source, non_subunit_name="stdout").run(result)
+        self.assertEqual([
+            ('status', None, None, None, True, 'stdout', b'f', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'o', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'b', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'a', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'r', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'\n', False, None, None, None),
+            ], result._events)
+        self.assertEqual(b'', source.read())
+
+    def test_signature_middle_utf8_char(self):
+        utf8_bytes = b'\xe3\xb3\x8a'
+        source = BytesIO(utf8_bytes)
+        # Should be treated as one character (it is u'\u3cca') and wrapped
+        result = StreamResult()
+        subunit.ByteStreamToStreamResult(
+            source, non_subunit_name="stdout").run(
+            result)
+        self.assertEqual([
+            ('status', None, None, None, True, 'stdout', b'\xe3', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'\xb3', False, None, None, None),
+            ('status', None, None, None, True, 'stdout', b'\x8a', False, None, None, None),
+            ], result._events)
+
+    def test_non_subunit_disabled_raises(self):
+        source = BytesIO(b"foo\nbar\n")
+        result = StreamResult()
+        case = subunit.ByteStreamToStreamResult(source)
+        e = self.assertRaises(Exception, case.run, result)
+        self.assertEqual(b'f', e.args[1])
+        self.assertEqual(b'oo\nbar\n', source.read())
+        self.assertEqual([], result._events)
+
+    def test_trivial_enumeration(self):
+        source = BytesIO(CONSTANT_ENUM)
+        result = StreamResult()
+        subunit.ByteStreamToStreamResult(
+            source, non_subunit_name="stdout").run(result)
+        self.assertEqual(b'', source.read())
+        self.assertEqual([
+            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+            ], result._events)
+
+    def test_multiple_events(self):
+        source = BytesIO(CONSTANT_ENUM + CONSTANT_ENUM)
+        result = StreamResult()
+        subunit.ByteStreamToStreamResult(
+            source, non_subunit_name="stdout").run(result)
+        self.assertEqual(b'', source.read())
+        self.assertEqual([
+            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+            ('status', 'foo', 'exists', None, True, None, None, False, None, None, None),
+            ], result._events)
+
+    def test_inprogress(self):
+        self.check_event(CONSTANT_INPROGRESS, 'inprogress')
+
+    def test_success(self):
+        self.check_event(CONSTANT_SUCCESS, 'success')
+
+    def test_uxsuccess(self):
+        self.check_event(CONSTANT_UXSUCCESS, 'uxsuccess')
+
+    def test_skip(self):
+        self.check_event(CONSTANT_SKIP, 'skip')
+
+    def test_fail(self):
+        self.check_event(CONSTANT_FAIL, 'fail')
+
+    def test_xfail(self):
+        self.check_event(CONSTANT_XFAIL, 'xfail')
+
+    def check_events(self, source_bytes, events):
+        source = BytesIO(source_bytes)
+        result = StreamResult()
+        subunit.ByteStreamToStreamResult(
+            source, non_subunit_name="stdout").run(result)
+        self.assertEqual(b'', source.read())
+        self.assertEqual(events, result._events)
+        #- any file attachments should be byte contents [as users assume that].
+        for event in result._events:
+            if event[5] is not None:
+                self.assertIsInstance(event[6], bytes)
+
+    def check_event(self, source_bytes, test_status=None, test_id="foo",
+        route_code=None, timestamp=None, tags=None, mime_type=None,
+        file_name=None, file_bytes=None, eof=False, runnable=True):
+        event = self._event(test_id=test_id, test_status=test_status,
+            tags=tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+        self.check_events(source_bytes, [event])
+
+    def _event(self, test_status=None, test_id=None, route_code=None,
+        timestamp=None, tags=None, mime_type=None, file_name=None,
+        file_bytes=None, eof=False, runnable=True):
+        return ('status', test_id, test_status, tags, runnable, file_name,
+            file_bytes, eof, mime_type, route_code, timestamp)
+
+    def test_eof(self):
+        self.check_event(CONSTANT_EOF, test_id=None, eof=True)
+
+    def test_file_content(self):
+        self.check_event(CONSTANT_FILE_CONTENT,
+            test_id=None, file_name="barney", file_bytes=b"woo")
+
+    def test_file_content_length_into_checksum(self):
+        # A bad file content length which creeps into the checksum.
+        bad_file_length_content = b'\xb3!@\x13\x06barney\x04woo\xdc\xe2\xdb\x35'
+        self.check_events(bad_file_length_content, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=bad_file_length_content,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b"File content extends past end of packet: claimed 4 bytes, 3 available",
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_packet_length_4_word_varint(self):
+        packet_data = b'\xb3!@\xc0\x00\x11'
+        self.check_events(packet_data, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=packet_data,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b"3 byte maximum given but 4 byte value found.",
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_mime(self):
+        self.check_event(CONSTANT_MIME,
+            test_id=None, mime_type='application/foo; charset=1')
+
+    def test_route_code(self):
+        self.check_event(CONSTANT_ROUTE_CODE,
+            'success', route_code="source", test_id="bar")
+
+    def test_runnable(self):
+        self.check_event(CONSTANT_RUNNABLE,
+            test_status='success', runnable=False)
+
+    def test_tags(self):
+        self.check_event(CONSTANT_TAGS[0],
+            None, tags=set(['foo', 'bar']), test_id="bar")
+
+    def test_timestamp(self):
+        timestamp = datetime.datetime(2001, 12, 12, 12, 59, 59, 45,
+            iso8601.Utc())
+        self.check_event(CONSTANT_TIMESTAMP,
+            'success', test_id='bar', timestamp=timestamp)
+
+    def test_bad_crc_errors_via_status(self):
+        file_bytes = CONSTANT_MIME[:-1] + b'\x00'
+        self.check_events( file_bytes, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=file_bytes,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b'Bad checksum - calculated (0x78335115), '
+                    b'stored (0x78335100)',
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_not_utf8_in_string(self):
+        file_bytes = CONSTANT_ROUTE_CODE[:5] + b'\xb4' + CONSTANT_ROUTE_CODE[6:-4] + b'\xce\x56\xc6\x17'
+        self.check_events(file_bytes, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=file_bytes,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b'UTF8 string at offset 2 is not UTF8',
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_NULL_in_string(self):
+        file_bytes = CONSTANT_ROUTE_CODE[:6] + b'\x00' + CONSTANT_ROUTE_CODE[7:-4] + b'\xd7\x41\xac\xfe'
+        self.check_events(file_bytes, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=file_bytes,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b'UTF8 string at offset 2 contains NUL byte',
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_bad_utf8_stringlength(self):
+        file_bytes = CONSTANT_ROUTE_CODE[:4] + b'\x3f' + CONSTANT_ROUTE_CODE[5:-4] + b'\xbe\x29\xe0\xc2'
+        self.check_events(file_bytes, [
+            self._event(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=file_bytes,
+                mime_type="application/octet-stream"),
+            self._event(test_id="subunit.parser", test_status="fail", eof=True,
+                file_name="Parser Error",
+                file_bytes=b'UTF8 string at offset 2 extends past end of '
+                    b'packet: claimed 63 bytes, 10 available',
+                mime_type="text/plain;charset=utf8"),
+            ])
+
+    def test_route_code_and_file_content(self):
+        content = BytesIO()
+        subunit.StreamResultToBytes(content).status(
+            route_code='0', mime_type='text/plain', file_name='bar',
+            file_bytes=b'foo')
+        self.check_event(content.getvalue(), test_id=None, file_name='bar',
+            route_code='0', mime_type='text/plain', file_bytes=b'foo')
diff --git a/third_party/subunit/python/subunit/tests/test_test_results.py b/third_party/subunit/python/subunit/tests/test_test_results.py
new file mode 100644
index 0000000..44f95b3
--- /dev/null
+++ b/third_party/subunit/python/subunit/tests/test_test_results.py
@@ -0,0 +1,566 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2009  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import csv
+import datetime
+import sys
+import unittest
+
+from testtools import TestCase
+from testtools.compat import StringIO
+from testtools.content import (
+    text_content,
+    TracebackContent,
+    )
+from testtools.testresult.doubles import ExtendedTestResult
+
+import subunit
+import subunit.iso8601 as iso8601
+import subunit.test_results
+
+import testtools
+
+
+class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
+
+    def __init__(self, decorated):
+        self._calls = 0
+        super(LoggingDecorator, self).__init__(decorated)
+
+    def _before_event(self):
+        self._calls += 1
+
+
+class AssertBeforeTestResult(LoggingDecorator):
+    """A TestResult for checking preconditions."""
+
+    def __init__(self, decorated, test):
+        self.test = test
+        super(AssertBeforeTestResult, self).__init__(decorated)
+
+    def _before_event(self):
+        self.test.assertEqual(1, self.earlier._calls)
+        super(AssertBeforeTestResult, self)._before_event()
+
+
+class TimeCapturingResult(unittest.TestResult):
+
+    def __init__(self):
+        super(TimeCapturingResult, self).__init__()
+        self._calls = []
+        self.failfast = False
+
+    def time(self, a_datetime):
+        self._calls.append(a_datetime)
+
+
+class TestHookedTestResultDecorator(unittest.TestCase):
+
+    def setUp(self):
+        # An end to the chain
+        terminal = unittest.TestResult()
+        # Asserts that the call was made to self.result before asserter was
+        # called.
+        asserter = AssertBeforeTestResult(terminal, self)
+        # The result object we call, which much increase its call count.
+        self.result = LoggingDecorator(asserter)
+        asserter.earlier = self.result
+        self.decorated = asserter
+
+    def tearDown(self):
+        # The hook in self.result must have been called
+        self.assertEqual(1, self.result._calls)
+        # The hook in asserter must have been called too, otherwise the
+        # assertion about ordering won't have completed.
+        self.assertEqual(1, self.decorated._calls)
+
+    def test_startTest(self):
+        self.result.startTest(self)
+
+    def test_startTestRun(self):
+        self.result.startTestRun()
+
+    def test_stopTest(self):
+        self.result.stopTest(self)
+
+    def test_stopTestRun(self):
+        self.result.stopTestRun()
+
+    def test_addError(self):
+        self.result.addError(self, subunit.RemoteError())
+
+    def test_addError_details(self):
+        self.result.addError(self, details={})
+
+    def test_addFailure(self):
+        self.result.addFailure(self, subunit.RemoteError())
+
+    def test_addFailure_details(self):
+        self.result.addFailure(self, details={})
+
+    def test_addSuccess(self):
+        self.result.addSuccess(self)
+
+    def test_addSuccess_details(self):
+        self.result.addSuccess(self, details={})
+
+    def test_addSkip(self):
+        self.result.addSkip(self, "foo")
+
+    def test_addSkip_details(self):
+        self.result.addSkip(self, details={})
+
+    def test_addExpectedFailure(self):
+        self.result.addExpectedFailure(self, subunit.RemoteError())
+
+    def test_addExpectedFailure_details(self):
+        self.result.addExpectedFailure(self, details={})
+
+    def test_addUnexpectedSuccess(self):
+        self.result.addUnexpectedSuccess(self)
+
+    def test_addUnexpectedSuccess_details(self):
+        self.result.addUnexpectedSuccess(self, details={})
+
+    def test_progress(self):
+        self.result.progress(1, subunit.PROGRESS_SET)
+
+    def test_wasSuccessful(self):
+        self.result.wasSuccessful()
+
+    def test_shouldStop(self):
+        self.result.shouldStop
+
+    def test_stop(self):
+        self.result.stop()
+
+    def test_time(self):
+        self.result.time(None)
+
+
+class TestAutoTimingTestResultDecorator(unittest.TestCase):
+
+    def setUp(self):
+        # And end to the chain which captures time events.
+        terminal = TimeCapturingResult()
+        # The result object under test.
+        self.result = subunit.test_results.AutoTimingTestResultDecorator(
+            terminal)
+        self.decorated = terminal
+
+    def test_without_time_calls_time_is_called_and_not_None(self):
+        self.result.startTest(self)
+        self.assertEqual(1, len(self.decorated._calls))
+        self.assertNotEqual(None, self.decorated._calls[0])
+
+    def test_no_time_from_progress(self):
+        self.result.progress(1, subunit.PROGRESS_CUR)
+        self.assertEqual(0, len(self.decorated._calls))
+
+    def test_no_time_from_shouldStop(self):
+        self.decorated.stop()
+        self.result.shouldStop
+        self.assertEqual(0, len(self.decorated._calls))
+
+    def test_calling_time_inhibits_automatic_time(self):
+        # Calling time() outputs a time signal immediately and prevents
+        # automatically adding one when other methods are called.
+        time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+        self.result.time(time)
+        self.result.startTest(self)
+        self.result.stopTest(self)
+        self.assertEqual(1, len(self.decorated._calls))
+        self.assertEqual(time, self.decorated._calls[0])
+
+    def test_calling_time_None_enables_automatic_time(self):
+        time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
+        self.result.time(time)
+        self.assertEqual(1, len(self.decorated._calls))
+        self.assertEqual(time, self.decorated._calls[0])
+        # Calling None passes the None through, in case other results care.
+        self.result.time(None)
+        self.assertEqual(2, len(self.decorated._calls))
+        self.assertEqual(None, self.decorated._calls[1])
+        # Calling other methods doesn't generate an automatic time event.
+        self.result.startTest(self)
+        self.assertEqual(3, len(self.decorated._calls))
+        self.assertNotEqual(None, self.decorated._calls[2])
+
+    def test_set_failfast_True(self):
+        self.assertFalse(self.decorated.failfast)
+        self.result.failfast = True
+        self.assertTrue(self.decorated.failfast)
+
+
+class TestTagCollapsingDecorator(TestCase):
+
+    def test_tags_collapsed_outside_of_tests(self):
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        tag_collapser.tags(set(['a']), set())
+        tag_collapser.tags(set(['b']), set())
+        tag_collapser.startTest(self)
+        self.assertEquals(
+            [('tags', set(['a', 'b']), set([])),
+             ('startTest', self),
+             ], result._events)
+
+    def test_tags_collapsed_outside_of_tests_are_flushed(self):
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        tag_collapser.startTestRun()
+        tag_collapser.tags(set(['a']), set())
+        tag_collapser.tags(set(['b']), set())
+        tag_collapser.startTest(self)
+        tag_collapser.addSuccess(self)
+        tag_collapser.stopTest(self)
+        tag_collapser.stopTestRun()
+        self.assertEquals(
+            [('startTestRun',),
+             ('tags', set(['a', 'b']), set([])),
+             ('startTest', self),
+             ('addSuccess', self),
+             ('stopTest', self),
+             ('stopTestRun',),
+             ], result._events)
+
+    def test_tags_forwarded_after_tests(self):
+        test = subunit.RemotedTestCase('foo')
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        tag_collapser.startTestRun()
+        tag_collapser.startTest(test)
+        tag_collapser.addSuccess(test)
+        tag_collapser.stopTest(test)
+        tag_collapser.tags(set(['a']), set(['b']))
+        tag_collapser.stopTestRun()
+        self.assertEqual(
+            [('startTestRun',),
+             ('startTest', test),
+             ('addSuccess', test),
+             ('stopTest', test),
+             ('tags', set(['a']), set(['b'])),
+             ('stopTestRun',),
+             ],
+            result._events)
+
+    def test_tags_collapsed_inside_of_tests(self):
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        test = subunit.RemotedTestCase('foo')
+        tag_collapser.startTest(test)
+        tag_collapser.tags(set(['a']), set())
+        tag_collapser.tags(set(['b']), set(['a']))
+        tag_collapser.tags(set(['c']), set())
+        tag_collapser.stopTest(test)
+        self.assertEquals(
+            [('startTest', test),
+             ('tags', set(['b', 'c']), set(['a'])),
+             ('stopTest', test)],
+            result._events)
+
+    def test_tags_collapsed_inside_of_tests_different_ordering(self):
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        test = subunit.RemotedTestCase('foo')
+        tag_collapser.startTest(test)
+        tag_collapser.tags(set(), set(['a']))
+        tag_collapser.tags(set(['a', 'b']), set())
+        tag_collapser.tags(set(['c']), set())
+        tag_collapser.stopTest(test)
+        self.assertEquals(
+            [('startTest', test),
+             ('tags', set(['a', 'b', 'c']), set()),
+             ('stopTest', test)],
+            result._events)
+
+    def test_tags_sent_before_result(self):
+        # Because addSuccess and friends tend to send subunit output
+        # immediately, and because 'tags:' before a result line means
+        # something different to 'tags:' after a result line, we need to be
+        # sure that tags are emitted before 'addSuccess' (or whatever).
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
+        test = subunit.RemotedTestCase('foo')
+        tag_collapser.startTest(test)
+        tag_collapser.tags(set(['a']), set())
+        tag_collapser.addSuccess(test)
+        tag_collapser.stopTest(test)
+        self.assertEquals(
+            [('startTest', test),
+             ('tags', set(['a']), set()),
+             ('addSuccess', test),
+             ('stopTest', test)],
+            result._events)
+
+
+class TestTimeCollapsingDecorator(TestCase):
+
+    def make_time(self):
+        # Heh heh.
+        return datetime.datetime(
+            2000, 1, self.getUniqueInteger(), tzinfo=iso8601.UTC)
+
+    def test_initial_time_forwarded(self):
+        # We always forward the first time event we see.
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+        a_time = self.make_time()
+        tag_collapser.time(a_time)
+        self.assertEquals([('time', a_time)], result._events)
+
+    def test_time_collapsed_to_first_and_last(self):
+        # If there are many consecutive time events, only the first and last
+        # are sent through.
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+        times = [self.make_time() for i in range(5)]
+        for a_time in times:
+            tag_collapser.time(a_time)
+        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+        self.assertEquals(
+            [('time', times[0]), ('time', times[-1])], result._events[:-1])
+
+    def test_only_one_time_sent(self):
+        # If we receive a single time event followed by a non-time event, we
+        # send exactly one time event.
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+        a_time = self.make_time()
+        tag_collapser.time(a_time)
+        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+        self.assertEquals([('time', a_time)], result._events[:-1])
+
+    def test_duplicate_times_not_sent(self):
+        # Many time events with the exact same time are collapsed into one
+        # time event.
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+        a_time = self.make_time()
+        for i in range(5):
+            tag_collapser.time(a_time)
+        tag_collapser.startTest(subunit.RemotedTestCase('foo'))
+        self.assertEquals([('time', a_time)], result._events[:-1])
+
+    def test_no_times_inserted(self):
+        result = ExtendedTestResult()
+        tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
+        a_time = self.make_time()
+        tag_collapser.time(a_time)
+        foo = subunit.RemotedTestCase('foo')
+        tag_collapser.startTest(foo)
+        tag_collapser.addSuccess(foo)
+        tag_collapser.stopTest(foo)
+        self.assertEquals(
+            [('time', a_time),
+             ('startTest', foo),
+             ('addSuccess', foo),
+             ('stopTest', foo)], result._events)
+
+
+class TestByTestResultTests(testtools.TestCase):
+
+    def setUp(self):
+        super(TestByTestResultTests, self).setUp()
+        self.log = []
+        self.result = subunit.test_results.TestByTestResult(self.on_test)
+        if sys.version_info >= (3, 0):
+            self.result._now = iter(range(5)).__next__
+        else:
+            self.result._now = iter(range(5)).next
+
+    def assertCalled(self, **kwargs):
+        defaults = {
+            'test': self,
+            'tags': set(),
+            'details': None,
+            'start_time': 0,
+            'stop_time': 1,
+            }
+        defaults.update(kwargs)
+        self.assertEqual([defaults], self.log)
+
+    def on_test(self, **kwargs):
+        self.log.append(kwargs)
+
+    def test_no_tests_nothing_reported(self):
+        self.result.startTestRun()
+        self.result.stopTestRun()
+        self.assertEqual([], self.log)
+
+    def test_add_success(self):
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertCalled(status='success')
+
+    def test_add_success_details(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addSuccess(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', details=details)
+
+    def test_tags(self):
+        if not getattr(self.result, 'tags', None):
+            self.skipTest("No tags in testtools")
+        self.result.tags(['foo'], [])
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', tags=set(['foo']))
+
+    def test_add_error(self):
+        self.result.startTest(self)
+        try:
+            1/0
+        except ZeroDivisionError:
+            error = sys.exc_info()
+        self.result.addError(self, error)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='error',
+            details={'traceback': TracebackContent(error, self)})
+
+    def test_add_error_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addError(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='error', details=details)
+
+    def test_add_failure(self):
+        self.result.startTest(self)
+        try:
+            self.fail("intentional failure")
+        except self.failureException:
+            failure = sys.exc_info()
+        self.result.addFailure(self, failure)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='failure',
+            details={'traceback': TracebackContent(failure, self)})
+
+    def test_add_failure_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addFailure(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='failure', details=details)
+
+    def test_add_xfail(self):
+        self.result.startTest(self)
+        try:
+            1/0
+        except ZeroDivisionError:
+            error = sys.exc_info()
+        self.result.addExpectedFailure(self, error)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='xfail',
+            details={'traceback': TracebackContent(error, self)})
+
+    def test_add_xfail_details(self):
+        self.result.startTest(self)
+        details = {"foo": text_content("bar")}
+        self.result.addExpectedFailure(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='xfail', details=details)
+
+    def test_add_unexpected_success(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addUnexpectedSuccess(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='success', details=details)
+
+    def test_add_skip_reason(self):
+        self.result.startTest(self)
+        reason = self.getUniqueString()
+        self.result.addSkip(self, reason)
+        self.result.stopTest(self)
+        self.assertCalled(
+            status='skip', details={'reason': text_content(reason)})
+
+    def test_add_skip_details(self):
+        self.result.startTest(self)
+        details = {'foo': 'bar'}
+        self.result.addSkip(self, details=details)
+        self.result.stopTest(self)
+        self.assertCalled(status='skip', details=details)
+
+    def test_twice(self):
+        self.result.startTest(self)
+        self.result.addSuccess(self, details={'foo': 'bar'})
+        self.result.stopTest(self)
+        self.result.startTest(self)
+        self.result.addSuccess(self)
+        self.result.stopTest(self)
+        self.assertEqual(
+            [{'test': self,
+              'status': 'success',
+              'start_time': 0,
+              'stop_time': 1,
+              'tags': set(),
+              'details': {'foo': 'bar'}},
+             {'test': self,
+              'status': 'success',
+              'start_time': 2,
+              'stop_time': 3,
+              'tags': set(),
+              'details': None},
+             ],
+            self.log)
+
+
+class TestCsvResult(testtools.TestCase):
+
+    def parse_stream(self, stream):
+        stream.seek(0)
+        reader = csv.reader(stream)
+        return list(reader)
+
+    def test_csv_output(self):
+        stream = StringIO()
+        result = subunit.test_results.CsvResult(stream)
+        if sys.version_info >= (3, 0):
+            result._now = iter(range(5)).__next__
+        else:
+            result._now = iter(range(5)).next
+        result.startTestRun()
+        result.startTest(self)
+        result.addSuccess(self)
+        result.stopTest(self)
+        result.stopTestRun()
+        self.assertEqual(
+            [['test', 'status', 'start_time', 'stop_time'],
+             [self.id(), 'success', '0', '1'],
+             ],
+            self.parse_stream(stream))
+
+    def test_just_header_when_no_tests(self):
+        stream = StringIO()
+        result = subunit.test_results.CsvResult(stream)
+        result.startTestRun()
+        result.stopTestRun()
+        self.assertEqual(
+            [['test', 'status', 'start_time', 'stop_time']],
+            self.parse_stream(stream))
+
+    def test_no_output_before_events(self):
+        stream = StringIO()
+        subunit.test_results.CsvResult(stream)
+        self.assertEqual([], self.parse_stream(stream))
diff --git a/third_party/subunit/python/subunit/v2.py b/third_party/subunit/python/subunit/v2.py
new file mode 100644
index 0000000..b1d508d
--- /dev/null
+++ b/third_party/subunit/python/subunit/v2.py
@@ -0,0 +1,494 @@
+#
+#  subunit: extensions to Python unittest to get test results from subprocesses.
+#  Copyright (C) 2013  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+import codecs
+utf_8_decode = codecs.utf_8_decode
+import datetime
+from io import UnsupportedOperation
+import os
+import select
+import struct
+import zlib
+
+from extras import safe_hasattr, try_imports
+builtins = try_imports(['__builtin__', 'builtins'])
+
+import subunit
+import subunit.iso8601 as iso8601
+
+__all__ = [
+    'ByteStreamToStreamResult',
+    'StreamResultToBytes',
+    ]
+
+SIGNATURE = b'\xb3'
+FMT_8  = '>B'
+FMT_16 = '>H'
+FMT_24 = '>HB'
+FMT_32 = '>I'
+FMT_TIMESTAMP = '>II'
+FLAG_TEST_ID = 0x0800
+FLAG_ROUTE_CODE = 0x0400
+FLAG_TIMESTAMP = 0x0200
+FLAG_RUNNABLE = 0x0100
+FLAG_TAGS = 0x0080
+FLAG_MIME_TYPE = 0x0020
+FLAG_EOF = 0x0010
+FLAG_FILE_CONTENT = 0x0040
+EPOCH = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=iso8601.Utc())
+NUL_ELEMENT = b'\0'[0]
+# Contains True for types for which 'nul in thing' falsely returns false.
+_nul_test_broken = {}
+
+
+def has_nul(buffer_or_bytes):
+    """Return True if a null byte is present in buffer_or_bytes."""
+    # Simple "if NUL_ELEMENT in utf8_bytes:" fails on Python 3.1 and 3.2 with
+    # memoryviews. See https://bugs.launchpad.net/subunit/+bug/1216246
+    buffer_type = type(buffer_or_bytes)
+    broken = _nul_test_broken.get(buffer_type)
+    if broken is None:
+        reference = buffer_type(b'\0')
+        broken = not NUL_ELEMENT in reference
+        _nul_test_broken[buffer_type] = broken
+    if broken:
+        return b'\0' in buffer_or_bytes
+    else:
+        return NUL_ELEMENT in buffer_or_bytes
+
+
+class ParseError(Exception):
+    """Used to pass error messages within the parser."""
+
+
+class StreamResultToBytes(object):
+    """Convert StreamResult API calls to bytes.
+
+    The StreamResult API is defined by testtools.StreamResult.
+    """
+
+    status_mask = {
+        None: 0,
+        'exists': 0x1,
+        'inprogress': 0x2,
+        'success': 0x3,
+        'uxsuccess': 0x4,
+        'skip': 0x5,
+        'fail': 0x6,
+        'xfail': 0x7,
+        }
+
+    zero_b = b'\0'[0]
+
+    def __init__(self, output_stream):
+        """Create a StreamResultToBytes with output written to output_stream.
+
+        :param output_stream: A file-like object. Must support write(bytes)
+            and flush() methods. Flush will be called after each write.
+            The stream will be passed through subunit.make_stream_binary,
+            to handle regular cases such as stdout.
+        """
+        self.output_stream = subunit.make_stream_binary(output_stream)
+
+    def startTestRun(self):
+        pass
+
+    def stopTestRun(self):
+        pass
+
+    def status(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        self._write_packet(test_id=test_id, test_status=test_status,
+            test_tags=test_tags, runnable=runnable, file_name=file_name,
+            file_bytes=file_bytes, eof=eof, mime_type=mime_type,
+            route_code=route_code, timestamp=timestamp)
+
+    def _write_utf8(self, a_string, packet):
+        utf8 = a_string.encode('utf-8')
+        self._write_number(len(utf8), packet)
+        packet.append(utf8)
+
+    def _write_len16(self, length, packet):
+        assert length < 65536
+        packet.append(struct.pack(FMT_16, length))
+
+    def _write_number(self, value, packet):
+        packet.extend(self._encode_number(value))
+
+    def _encode_number(self, value):
+        assert value >= 0
+        if value < 64:
+            return [struct.pack(FMT_8, value)]
+        elif value < 16384:
+            value = value | 0x4000
+            return [struct.pack(FMT_16, value)]
+        elif value < 4194304:
+            value = value | 0x800000
+            return [struct.pack(FMT_16, value >> 8),
+                    struct.pack(FMT_8, value & 0xff)]
+        elif value < 1073741824:
+            value = value | 0xc0000000
+            return [struct.pack(FMT_32, value)]
+        else:
+            raise ValueError('value too large to encode: %r' % (value,))
+
+    def _write_packet(self, test_id=None, test_status=None, test_tags=None,
+        runnable=True, file_name=None, file_bytes=None, eof=False,
+        mime_type=None, route_code=None, timestamp=None):
+        packet = [SIGNATURE]
+        packet.append(b'FF') # placeholder for flags
+        # placeholder for length, but see below as length is variable.
+        packet.append(b'')
+        flags = 0x2000 # Version 0x2
+        if timestamp is not None:
+            flags = flags | FLAG_TIMESTAMP
+            since_epoch = timestamp - EPOCH
+            nanoseconds = since_epoch.microseconds * 1000
+            seconds = (since_epoch.seconds + since_epoch.days * 24 * 3600)
+            packet.append(struct.pack(FMT_32, seconds))
+            self._write_number(nanoseconds, packet)
+        if test_id is not None:
+            flags = flags | FLAG_TEST_ID
+            self._write_utf8(test_id, packet)
+        if test_tags:
+            flags = flags | FLAG_TAGS
+            self._write_number(len(test_tags), packet)
+            for tag in test_tags:
+                self._write_utf8(tag, packet)
+        if runnable:
+            flags = flags | FLAG_RUNNABLE
+        if mime_type:
+            flags = flags | FLAG_MIME_TYPE
+            self._write_utf8(mime_type, packet)
+        if file_name is not None:
+            flags = flags | FLAG_FILE_CONTENT
+            self._write_utf8(file_name, packet)
+            self._write_number(len(file_bytes), packet)
+            packet.append(file_bytes)
+        if eof:
+           flags = flags | FLAG_EOF
+        if route_code is not None:
+            flags = flags | FLAG_ROUTE_CODE
+            self._write_utf8(route_code, packet)
+        # 0x0008 - not used in v2.
+        flags = flags | self.status_mask[test_status]
+        packet[1] = struct.pack(FMT_16, flags)
+        base_length = sum(map(len, packet)) + 4
+        if base_length <= 62:
+            # one byte to encode length, 62+1 = 63
+            length_length = 1
+        elif base_length <= 16381:
+            # two bytes to encode length, 16381+2 = 16383
+            length_length = 2
+        elif base_length <= 4194300:
+            # three bytes to encode length, 419430+3=4194303
+            length_length = 3
+        else:
+            # Longer than policy:
+            # TODO: chunk the packet automatically?
+            # - strip all but file data
+            # - do 4M chunks of that till done
+            # - include original data in final chunk.
+            raise ValueError("Length too long: %r" % base_length)
+        packet[2:3] = self._encode_number(base_length + length_length)
+        # We could either do a partial application of crc32 over each chunk
+        # or a single join to a temp variable then a final join
+        # or two writes (that python might then split).
+        # For now, simplest code: join, crc32, join, output
+        content = b''.join(packet)
+        self.output_stream.write(content + struct.pack(
+            FMT_32, zlib.crc32(content) & 0xffffffff))
+        self.output_stream.flush()
+
+
+class ByteStreamToStreamResult(object):
+    """Parse a subunit byte stream.
+
+    Mixed streams that contain non-subunit content is supported when a
+    non_subunit_name is passed to the contructor. The default is to raise an
+    error containing the non-subunit byte after it has been read from the
+    stream.
+
+    Typical use:
+
+       >>> case = ByteStreamToStreamResult(sys.stdin.buffer)
+       >>> result = StreamResult()
+       >>> result.startTestRun()
+       >>> case.run(result)
+       >>> result.stopTestRun()
+    """
+
+    status_lookup = {
+        0x0: None,
+        0x1: 'exists',
+        0x2: 'inprogress',
+        0x3: 'success',
+        0x4: 'uxsuccess',
+        0x5: 'skip',
+        0x6: 'fail',
+        0x7: 'xfail',
+        }
+
+    def __init__(self, source, non_subunit_name=None):
+        """Create a ByteStreamToStreamResult.
+
+        :param source: A file like object to read bytes from. Must support
+            read(<count>) and return bytes. The file is not closed by
+            ByteStreamToStreamResult. subunit.make_stream_binary() is
+            called on the stream to get it into bytes mode.
+        :param non_subunit_name: If set to non-None, non subunit content
+            encountered in the stream will be converted into file packets
+            labelled with this name.
+        """
+        self.non_subunit_name = non_subunit_name
+        self.source = subunit.make_stream_binary(source)
+        self.codec = codecs.lookup('utf8').incrementaldecoder()
+
+    def run(self, result):
+        """Parse source and emit events to result.
+
+        This is a blocking call: it will run until EOF is detected on source.
+        """
+        self.codec.reset()
+        mid_character = False
+        while True:
+            # We're in blocking mode; read one char
+            content = self.source.read(1)
+            if not content:
+                # EOF
+                return
+            if not mid_character and content[0] == SIGNATURE[0]:
+                self._parse_packet(result)
+                continue
+            if self.non_subunit_name is None:
+                raise Exception("Non subunit content", content)
+            try:
+                if self.codec.decode(content):
+                    # End of a character
+                    mid_character = False
+                else:
+                    mid_character = True
+            except UnicodeDecodeError:
+                # Bad unicode, not our concern.
+                mid_character = False
+            # Aggregate all content that is not subunit until either
+            # 1MiB is accumulated or 50ms has passed with no input.
+            # Both are arbitrary amounts intended to give a simple
+            # balance between efficiency (avoiding death by a thousand
+            # one-byte packets), buffering (avoiding overlarge state
+            # being hidden on intermediary nodes) and interactivity
+            # (when driving a debugger, slow response to typing is
+            # annoying).
+            buffered = [content]
+            while len(buffered[-1]):
+                try:
+                    self.source.fileno()
+                except:
+                    # Won't be able to select, fallback to
+                    # one-byte-at-a-time.
+                    break
+                # Note: this has a very low timeout because with stdin, the
+                # BufferedIO layer typically has all the content available
+                # from the stream when e.g. pdb is dropped into, leading to
+                # select always timing out when in fact we could have read
+                # (from the buffer layer) - we typically fail to aggregate
+                # any content on 3.x Pythons.
+                readable = select.select([self.source], [], [], 0.000001)[0]
+                if readable:
+                    content = self.source.read(1)
+                    if not len(content):
+                        # EOF - break and emit buffered.
+                        break
+                    if not mid_character and content[0] == SIGNATURE[0]:
+                        # New packet, break, emit buffered, then parse.
+                        break
+                    buffered.append(content)
+                    # Feed into the codec.
+                    try:
+                        if self.codec.decode(content):
+                            # End of a character
+                            mid_character = False
+                        else:
+                            mid_character = True
+                    except UnicodeDecodeError:
+                        # Bad unicode, not our concern.
+                        mid_character = False
+                if not readable or len(buffered) >= 1048576:
+                    # timeout or too much data, emit what we have.
+                    break
+            result.status(
+                file_name=self.non_subunit_name,
+                file_bytes=b''.join(buffered))
+            if mid_character or not len(content) or content[0] != SIGNATURE[0]:
+                continue
+            # Otherwise, parse a data packet.
+            self._parse_packet(result)
+
+    def _parse_packet(self, result):
+        try:
+            packet = [SIGNATURE]
+            self._parse(packet, result)
+        except ParseError as error:
+            result.status(test_id="subunit.parser", eof=True,
+                file_name="Packet data", file_bytes=b''.join(packet),
+                mime_type="application/octet-stream")
+            result.status(test_id="subunit.parser", test_status='fail',
+                eof=True, file_name="Parser Error",
+                file_bytes=(error.args[0]).encode('utf8'),
+                mime_type="text/plain;charset=utf8")
+
+    def _to_bytes(self, data, pos, length):
+        """Return a slice of data from pos for length as bytes."""
+        # memoryview in 2.7.3 and 3.2 isn't directly usable with struct :(.
+        # see https://bugs.launchpad.net/subunit/+bug/1216163
+        result = data[pos:pos+length]
+        if type(result) is not bytes:
+            return result.tobytes()
+        return result
+
+    def _parse_varint(self, data, pos, max_3_bytes=False):
+        # because the only incremental IO we do is at the start, and the 32 bit
+        # CRC means we can always safely read enough to cover any varint, we
+        # can be sure that there should be enough data - and if not it is an
+        # error not a normal situation.
+        data_0 = struct.unpack(FMT_8, self._to_bytes(data, pos, 1))[0]
+        typeenum = data_0 & 0xc0
+        value_0 = data_0 & 0x3f
+        if typeenum == 0x00:
+            return value_0, 1
+        elif typeenum == 0x40:
+            data_1 = struct.unpack(FMT_8, self._to_bytes(data, pos+1, 1))[0]
+            return (value_0 << 8) | data_1, 2
+        elif typeenum == 0x80:
+            data_1 = struct.unpack(FMT_16, self._to_bytes(data, pos+1, 2))[0]
+            return (value_0 << 16) | data_1, 3
+        else:
+            if max_3_bytes:
+                raise ParseError('3 byte maximum given but 4 byte value found.')
+            data_1, data_2 = struct.unpack(FMT_24, self._to_bytes(data, pos+1, 3))
+            result = (value_0 << 24) | data_1 << 8 | data_2
+            return result, 4
+
+    def _parse(self, packet, result):
+            # 2 bytes flags, at most 3 bytes length.
+            packet.append(self.source.read(5))
+            flags = struct.unpack(FMT_16, packet[-1][:2])[0]
+            length, consumed = self._parse_varint(
+                packet[-1], 2, max_3_bytes=True)
+            remainder = self.source.read(length - 6)
+            if len(remainder) != length - 6:
+                raise ParseError(
+                    'Short read - got %d bytes, wanted %d bytes' % (
+                    len(remainder), length - 6))
+            if consumed != 3:
+                # Avoid having to parse torn values
+                packet[-1] += remainder
+                pos = 2 + consumed
+            else:
+                # Avoid copying potentially lots of data.
+                packet.append(remainder)
+                pos = 0
+            crc = zlib.crc32(packet[0])
+            for fragment in packet[1:-1]:
+                crc = zlib.crc32(fragment, crc)
+            crc = zlib.crc32(packet[-1][:-4], crc) & 0xffffffff
+            packet_crc = struct.unpack(FMT_32, packet[-1][-4:])[0]
+            if crc != packet_crc:
+                # Bad CRC, report it and stop parsing the packet.
+                raise ParseError(
+                    'Bad checksum - calculated (0x%x), stored (0x%x)'
+                        % (crc, packet_crc))
+            if safe_hasattr(builtins, 'memoryview'):
+                body = memoryview(packet[-1])
+            else:
+                body = packet[-1]
+            # Discard CRC-32
+            body = body[:-4]
+            # One packet could have both file and status data; the Python API
+            # presents these separately (perhaps it shouldn't?)
+            if flags & FLAG_TIMESTAMP:
+                seconds = struct.unpack(FMT_32, self._to_bytes(body, pos, 4))[0]
+                nanoseconds, consumed = self._parse_varint(body, pos+4)
+                pos = pos + 4 + consumed
+                timestamp = EPOCH + datetime.timedelta(
+                    seconds=seconds, microseconds=nanoseconds/1000)
+            else:
+                timestamp = None
+            if flags & FLAG_TEST_ID:
+                test_id, pos = self._read_utf8(body, pos)
+            else:
+                test_id = None
+            if flags & FLAG_TAGS:
+                tag_count, consumed = self._parse_varint(body, pos)
+                pos += consumed
+                test_tags = set()
+                for _ in range(tag_count):
+                    tag, pos = self._read_utf8(body, pos)
+                    test_tags.add(tag)
+            else:
+                test_tags = None
+            if flags & FLAG_MIME_TYPE:
+                mime_type, pos = self._read_utf8(body, pos)
+            else:
+                mime_type = None
+            if flags & FLAG_FILE_CONTENT:
+                file_name, pos = self._read_utf8(body, pos)
+                content_length, consumed = self._parse_varint(body, pos)
+                pos += consumed
+                file_bytes = self._to_bytes(body, pos, content_length)
+                if len(file_bytes) != content_length:
+                    raise ParseError('File content extends past end of packet: '
+                        'claimed %d bytes, %d available' % (
+                        content_length, len(file_bytes)))
+                pos += content_length
+            else:
+                file_name = None
+                file_bytes = None
+            if flags & FLAG_ROUTE_CODE:
+                route_code, pos = self._read_utf8(body, pos)
+            else:
+                route_code = None
+            runnable = bool(flags & FLAG_RUNNABLE)
+            eof = bool(flags & FLAG_EOF)
+            test_status = self.status_lookup[flags & 0x0007]
+            result.status(test_id=test_id, test_status=test_status,
+                test_tags=test_tags, runnable=runnable, mime_type=mime_type,
+                eof=eof, file_name=file_name, file_bytes=file_bytes,
+                route_code=route_code, timestamp=timestamp)
+    __call__ = run
+
+    def _read_utf8(self, buf, pos):
+        length, consumed = self._parse_varint(buf, pos)
+        pos += consumed
+        utf8_bytes = buf[pos:pos+length]
+        if length != len(utf8_bytes):
+            raise ParseError(
+                'UTF8 string at offset %d extends past end of packet: '
+                'claimed %d bytes, %d available' % (pos - 2, length,
+                len(utf8_bytes)))
+        if has_nul(utf8_bytes):
+            raise ParseError('UTF8 string at offset %d contains NUL byte' % (
+                pos-2,))
+        try:
+            utf8, decoded_bytes = utf_8_decode(utf8_bytes)
+            if decoded_bytes != length:
+                raise ParseError("Invalid (partially decodable) string at "
+                    "offset %d, %d undecoded bytes" % (
+                    pos-2, length - decoded_bytes))
+            return utf8, length+pos
+        except UnicodeDecodeError:
+            raise ParseError('UTF8 string at offset %d is not UTF8' % (pos-2,))
diff --git a/third_party/subunit/setup.py b/third_party/subunit/setup.py
new file mode 100755
index 0000000..d42d3d7
--- /dev/null
+++ b/third_party/subunit/setup.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+import os.path
+try:
+    # If the user has setuptools / distribute installed, use it
+    from setuptools import setup
+except ImportError:
+    # Otherwise, fall back to distutils.
+    from distutils.core import setup
+    extra = {}
+else:
+    extra = {
+        'install_requires': [
+            'extras',
+            'testtools>=0.9.34',
+        ],
+        'tests_require': [
+            'testscenarios',
+        ],
+    }
+
+
+def _get_version_from_file(filename, start_of_line, split_marker):
+    """Extract version from file, giving last matching value or None"""
+    try:
+        return [x for x in open(filename)
+            if x.startswith(start_of_line)][-1].split(split_marker)[1].strip()
+    except (IOError, IndexError):
+        return None
+
+
+VERSION = (
+    # Assume we are in a distribution, which has PKG-INFO
+    _get_version_from_file('PKG-INFO', 'Version:', ':')
+    # Must be a development checkout, so use the Makefile
+    or _get_version_from_file('Makefile', 'VERSION', '=')
+    or "0.0")
+
+
+relpath = os.path.dirname(__file__)
+if relpath:
+    os.chdir(relpath)
+setup(
+    name='python-subunit',
+    version=VERSION,
+    description=('Python implementation of subunit test streaming protocol'),
+    long_description=open('README').read(),
+    classifiers=[
+        'Intended Audience :: Developers',
+        'Programming Language :: Python :: 3',
+        'Programming Language :: Python',
+        'Topic :: Software Development :: Testing',
+    ],
+    keywords='python test streaming',
+    author='Robert Collins',
+    author_email='subunit-dev at lists.launchpad.net',
+    url='http://launchpad.net/subunit',
+    packages=['subunit', 'subunit.tests'],
+    package_dir={'subunit': 'python/subunit'},
+    scripts = [
+        'filters/subunit-1to2',
+        'filters/subunit-2to1',
+        'filters/subunit-filter',
+        'filters/subunit-ls',
+        'filters/subunit-notify',
+        'filters/subunit-output',
+        'filters/subunit-stats',
+        'filters/subunit-tags',
+        'filters/subunit2csv',
+        'filters/subunit2gtk',
+        'filters/subunit2junitxml',
+        'filters/subunit2pyunit',
+        'filters/tap2subunit',
+    ],
+    **extra
+)
diff --git a/third_party/subunit/shell/README b/third_party/subunit/shell/README
new file mode 100644
index 0000000..65fb40d
--- /dev/null
+++ b/third_party/subunit/shell/README
@@ -0,0 +1,62 @@
+#
+#  subunit shell bindings.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+This tree contains shell bindings to the subunit protocol. They are written
+entirely in shell, and unit tested in shell. See the tests/ directory for the
+test scripts. You can use `make check` to run the tests. There is a trivial
+python test_shell.py which uses the pyunit gui to expose the test results in a
+compact form.
+
+The shell bindings consist of four functions which you can use to output test
+metadata trivially. See share/subunit.sh for the functions and comments.
+
+However, this is not a full test environment, its support code for reporting to
+subunit. You can look at ShUnit (http://shunit.sourceforge.net) for 'proper'
+shell based xUnit functionality. There is a patch for ShUnit 1.3
+(subunit-ui.patch) in the subunit source tree. I hope to have that integrated
+upstream in the near future. I will delete the copy of the patch in the subunit
+tree a release or two later.
+
+If you are a test environment maintainer - either homegrown, or ShUnit or some
+such, you will need to see how the subunit calls should be used. Here is what
+a manually written test using the bindings might look like:
+
+
+subunit_start_test "test name"
+# determine if test passes or fails
+result=$(something)
+if [ $result == 0 ]; then
+  subunit_pass_test "test name"
+else
+  subunit_fail_test "test name" <<END
+Something went wrong running something:
+exited with result: '$func_status'
+END
+fi
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/third_party/subunit/shell/share/subunit.sh b/third_party/subunit/shell/share/subunit.sh
new file mode 100644
index 0000000..8c28266
--- /dev/null
+++ b/third_party/subunit/shell/share/subunit.sh
@@ -0,0 +1,59 @@
+#
+#  subunit.sh: shell functions to report test status via the subunit protocol.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+subunit_start_test () {
+  # emit the current protocol start-marker for test $1
+  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
+  echo "test: $1"
+}
+
+
+subunit_pass_test () {
+  # emit the current protocol test passed marker for test $1
+  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
+  echo "success: $1"
+}
+
+
+subunit_fail_test () {
+  # emit the current protocol fail-marker for test $1, and emit stdin as
+  # the error text.
+  # we use stdin because the failure message can be arbitrarily long, and this
+  # makes it convenient to write in scripts (using <<END syntax.
+  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
+  echo "failure: $1 ["
+  cat -
+  echo "]"
+}
+
+
+subunit_error_test () {
+  # emit the current protocol error-marker for test $1, and emit stdin as
+  # the error text.
+  # we use stdin because the failure message can be arbitrarily long, and this
+  # makes it convenient to write in scripts (using <<END syntax.
+  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
+  echo "error: $1 ["
+  cat -
+  echo "]"
+}
+
+
+subunit_skip_test () {
+  # emit the current protocol test skipped marker for test $1
+  echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
+  echo "skip: $1"
+}
diff --git a/third_party/subunit/shell/tests/test_function_output.sh b/third_party/subunit/shell/tests/test_function_output.sh
new file mode 100755
index 0000000..850b096
--- /dev/null
+++ b/third_party/subunit/shell/tests/test_function_output.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#  subunit shell bindings.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+
+# this script tests the output of the methods. As each is tested we start using
+# it.
+# So the first test manually implements the entire protocol, the next uses the
+# start method and so on.
+# it is assumed that we are running from the 'shell' tree root in the source
+# of subunit, and that the library sourcing tests have all passed - if they
+# have not, this test script may well fail strangely.
+
+# import the library.
+. ${SHELL_SHARE}subunit.sh
+
+echo 'test: subunit_start_test output'
+func_output=$(subunit_start_test "foo bar"|grep -v 'time:')
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xtest: foo bar" ]; then
+  echo 'success: subunit_start_test output'
+else
+  echo 'failure: subunit_start_test output ['
+  echo 'got an error code or incorrect output:'
+  echo "exit: $func_status"
+  echo "output: '$func_output'"
+  echo ']' ;
+fi
+
+subunit_start_test "subunit_pass_test output"
+func_output=$(subunit_pass_test "foo bar"|grep -v 'time:')
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xsuccess: foo bar" ]; then
+  subunit_pass_test "subunit_pass_test output"
+else
+  echo 'failure: subunit_pass_test output ['
+  echo 'got an error code or incorrect output:'
+  echo "exit: $func_status"
+  echo "output: '$func_output'"
+  echo ']' ;
+fi
+
+subunit_start_test "subunit_fail_test output"
+func_output=$((subunit_fail_test "foo bar" <<END
+something
+  wrong
+here
+END
+)|grep -v 'time:')
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xfailure: foo bar [
+something
+  wrong
+here
+]" ]; then
+  subunit_pass_test "subunit_fail_test output"
+else
+  echo 'failure: subunit_fail_test output ['
+  echo 'got an error code or incorrect output:'
+  echo "exit: $func_status"
+  echo "output: '$func_output'"
+  echo ']' ;
+fi
+
+subunit_start_test "subunit_error_test output"
+func_output=$((subunit_error_test "foo bar" <<END
+something
+  died
+here
+END
+)| grep -v 'time:')
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xerror: foo bar [
+something
+  died
+here
+]" ]; then
+  subunit_pass_test "subunit_error_test output"
+else
+  subunit_fail_test "subunit_error_test output" <<END
+got an error code or incorrect output:
+exit: $func_status
+output: '$func_output'
+END
+fi
diff --git a/third_party/subunit/shell/tests/test_source_library.sh b/third_party/subunit/shell/tests/test_source_library.sh
new file mode 100755
index 0000000..51cb2e0
--- /dev/null
+++ b/third_party/subunit/shell/tests/test_source_library.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+#  subunit shell bindings.
+#  Copyright (C) 2006  Robert Collins <robertc at robertcollins.net>
+#
+#  Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+#  license at the users choice. A copy of both licenses are available in the
+#  project source as Apache-2.0 and BSD. You may not use this file except in
+#  compliance with one of these two licences.
+#
+#  Unless required by applicable law or agreed to in writing, software
+#  distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+#  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+#  license you chose for the specific language governing permissions and
+#  limitations under that license.
+#
+
+
+# this script tests that we can source the subunit shell bindings successfully.
+# It manually implements the control protocol so that it des not depend on the
+# bindings being complete yet.
+
+# we expect to be run from the tree root.
+
+echo 'test: shell bindings can be sourced'
+# if any output occurs, this has failed to source cleanly
+source_output=$(. ${SHELL_SHARE}subunit.sh 2>&1)
+if [ $? == 0 -a "x$source_output" = "x" ]; then
+  echo 'success: shell bindings can be sourced'
+else
+  echo 'failure: shell bindings can be sourced ['
+  echo 'got an error code or output during sourcing.:'
+  echo $source_output
+  echo ']' ;
+fi
+
+# now source it for real
+. ${SHELL_SHARE}subunit.sh
+
+# we should have a start_test function
+echo 'test: subunit_start_test exists'
+found_type=$(type -t subunit_start_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+  echo 'success: subunit_start_test exists'
+else
+  echo 'failure: subunit_start_test exists ['
+  echo 'subunit_start_test is not a function:'
+  echo "type -t status: $status"
+  echo "output: $found_type"
+  echo ']' ;
+fi
+
+# we should have a pass_test function
+echo 'test: subunit_pass_test exists'
+found_type=$(type -t subunit_pass_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+  echo 'success: subunit_pass_test exists'
+else
+  echo 'failure: subunit_pass_test exists ['
+  echo 'subunit_pass_test is not a function:'
+  echo "type -t status: $status"
+  echo "output: $found_type"
+  echo ']' ;
+fi
+
+# we should have a fail_test function
+echo 'test: subunit_fail_test exists'
+found_type=$(type -t subunit_fail_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+  echo 'success: subunit_fail_test exists'
+else
+  echo 'failure: subunit_fail_test exists ['
+  echo 'subunit_fail_test is not a function:'
+  echo "type -t status: $status"
+  echo "output: $found_type"
+  echo ']' ;
+fi
+
+# we should have a error_test function
+echo 'test: subunit_error_test exists'
+found_type=$(type -t subunit_error_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+  echo 'success: subunit_error_test exists'
+else
+  echo 'failure: subunit_error_test exists ['
+  echo 'subunit_error_test is not a function:'
+  echo "type -t status: $status"
+  echo "output: $found_type"
+  echo ']' ;
+fi
+
+# we should have a skip_test function
+echo 'test: subunit_skip_test exists'
+found_type=$(type -t subunit_skip_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+  echo 'success: subunit_skip_test exists'
+else
+  echo 'failure: subunit_skip_test exists ['
+  echo 'subunit_skip_test is not a function:'
+  echo "type -t status: $status"
+  echo "output: $found_type"
+  echo ']' ;
+fi
diff --git a/third_party/wscript_build b/third_party/wscript_build
index 63f616e..dee22ba 100644
--- a/third_party/wscript_build
+++ b/third_party/wscript_build
@@ -7,6 +7,7 @@ external_libs = {
     "dns.resolver": "dnspython/dns",
     "mimeparse": "mimeparse",
     "extras": "python-extras/extras",
+    "subunit": "subunit/python/subunit",
     "testtools": "testtools/testtools",
     }
 
@@ -29,3 +30,4 @@ bld.SAMBA_GENERATOR('third_party_init_py',
 bld.INSTALL_FILES('${PYTHONARCHDIR}/samba/third_party', 'empty_file', destname='__init__.py')
 bld.RECURSE('zlib')
 bld.RECURSE('popt')
+bld.RECURSE('subunit/c')
diff --git a/wscript b/wscript
index 37a1f10..459287f 100644
--- a/wscript
+++ b/wscript
@@ -160,7 +160,6 @@ def configure(conf):
     conf.RECURSE('nsswitch')
     conf.RECURSE('lib/socket_wrapper')
     conf.RECURSE('lib/uid_wrapper')
-    conf.RECURSE('lib/subunit/c')
     conf.RECURSE('libcli/smbreadline')
     conf.RECURSE('lib/crypto')
     conf.RECURSE('pidl')
diff --git a/wscript_build b/wscript_build
index e74841e..403ec9c 100644
--- a/wscript_build
+++ b/wscript_build
@@ -105,7 +105,6 @@ bld.RECURSE('source4/libcli')
 bld.RECURSE('libcli/smb')
 bld.RECURSE('libcli/util')
 bld.RECURSE('libcli/cldap')
-bld.RECURSE('lib/subunit/c')
 bld.RECURSE('lib/smbconf')
 bld.RECURSE('lib/async_req')
 bld.RECURSE('lib/dbwrap')
-- 
2.1.3



More information about the samba-technical mailing list