[PATCH 03/17] testtools: Update to new upstream snapshot.
Jelmer Vernooij
jelmer at samba.org
Sat Nov 1 13:21:55 MDT 2014
Change-Id: Ic949ec35b60eef84918327dc8616c785674824ff
Signed-Off-By: Jelmer Vernooij <jelmer at samba.org>
---
lib/testtools/NEWS | 9 +++++
lib/testtools/testtools/content.py | 48 ++++++++++++------------
lib/testtools/testtools/tests/test_content.py | 11 ++++++
lib/testtools/testtools/tests/test_testresult.py | 39 ++++++++++++-------
4 files changed, 70 insertions(+), 37 deletions(-)
diff --git a/lib/testtools/NEWS b/lib/testtools/NEWS
index ea4288f..640a952 100644
--- a/lib/testtools/NEWS
+++ b/lib/testtools/NEWS
@@ -7,6 +7,15 @@ Changes and improvements to testtools_, grouped by release.
NEXT
~~~~
+Changes
+-------
+
+* Fixed unit tests which were failing under pypy due to a change in the way
+ pypy formats tracebacks. (Thomi Richards)
+
+* Make `testtools.content.text_content` error if anything other than text
+ is given as content. (Thomi Richards)
+
1.1.0
~~~~~
diff --git a/lib/testtools/testtools/content.py b/lib/testtools/testtools/content.py
index 401004b..101b631 100644
--- a/lib/testtools/testtools/content.py
+++ b/lib/testtools/testtools/content.py
@@ -25,9 +25,9 @@ from testtools.compat import (
_b,
_format_exception_only,
_format_stack_list,
- _isbytes,
_TB_HEADER,
_u,
+ istext,
str_is_unicode,
)
from testtools.content_type import ContentType, JSON, UTF8_TEXT
@@ -63,11 +63,11 @@ def _iter_chunks(stream, chunk_size, seek_offset=None, seek_whence=0):
class Content(object):
"""A MIME-like Content object.
- Content objects can be serialised to bytes using the iter_bytes method.
- If the Content-Type is recognised by other code, they are welcome to
+ 'Content' objects can be serialised to bytes using the iter_bytes method.
+ If the 'Content-Type' is recognised by other code, they are welcome to
look for richer contents that mere byte serialisation - for example in
memory object graphs etc. However, such code MUST be prepared to receive
- a generic Content object that has been reconstructed from a byte stream.
+ a generic 'Content' object that has been reconstructed from a byte stream.
:ivar content_type: The content type of this Content.
"""
@@ -128,7 +128,7 @@ class Content(object):
class StackLinesContent(Content):
"""Content object for stack lines.
- This adapts a list of "preprocessed" stack lines into a content object.
+ This adapts a list of "preprocessed" stack lines into a 'Content' object.
The stack lines are most likely produced from ``traceback.extract_stack``
or ``traceback.extract_tb``.
@@ -180,8 +180,8 @@ class StackLinesContent(Content):
def TracebackContent(err, test):
"""Content object for tracebacks.
- This adapts an exc_info tuple to the Content interface.
- text/x-traceback;language=python is used for the mime type, in order to
+ This adapts an exc_info tuple to the 'Content' interface.
+ 'text/x-traceback;language=python' is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
if err is None:
@@ -223,7 +223,7 @@ def TracebackContent(err, test):
def StacktraceContent(prefix_content="", postfix_content=""):
"""Content object for stack traces.
- This function will create and return a content object that contains a
+ This function will create and return a 'Content' object that contains a
stack trace.
The mime type is set to 'text/x-traceback;language=python', so other
@@ -251,7 +251,7 @@ def StacktraceContent(prefix_content="", postfix_content=""):
def json_content(json_data):
- """Create a JSON `Content` object from JSON-encodeable data."""
+ """Create a JSON Content object from JSON-encodeable data."""
data = json.dumps(json_data)
if str_is_unicode:
# The json module perversely returns native str not bytes
@@ -260,12 +260,14 @@ def json_content(json_data):
def text_content(text):
- """Create a `Content` object from some text.
+ """Create a Content object from some text.
This is useful for adding details which are short strings.
"""
- if _isbytes(text):
- raise TypeError('text_content must be given a string, not bytes.')
+ if not istext(text):
+ raise TypeError(
+ "text_content must be given text, not '%s'." % type(text).__name__
+ )
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
@@ -278,9 +280,9 @@ def maybe_wrap(wrapper, func):
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
buffer_now=False, seek_offset=None, seek_whence=0):
- """Create a `Content` object from a file on disk.
+ """Create a Content object from a file on disk.
- Note that unless 'read_now' is explicitly passed in as True, the file
+ Note that unless ``buffer_now`` is explicitly passed in as True, the file
will only be read from when ``iter_bytes`` is called.
:param path: The path to the file to be used as content.
@@ -291,7 +293,7 @@ def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
:param seek_offset: If non-None, seek within the stream before reading it.
- :param seek_whence: If supplied, pass to stream.seek() when seeking.
+ :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
@@ -308,13 +310,13 @@ def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
def content_from_stream(stream, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False,
seek_offset=None, seek_whence=0):
- """Create a `Content` object from a file-like stream.
+ """Create a Content object from a file-like stream.
- Note that the stream will only be read from when ``iter_bytes`` is
- called.
+ Note that unless ``buffer_now`` is explicitly passed in as True, the stream
+ will only be read from when ``iter_bytes`` is called.
:param stream: A file-like object to read the content from. The stream
- is not closed by this function or the content object it returns.
+ is not closed by this function or the 'Content' object it returns.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
@@ -322,7 +324,7 @@ def content_from_stream(stream, content_type=None,
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
:param seek_offset: If non-None, seek within the stream before reading it.
- :param seek_whence: If supplied, pass to stream.seek() when seeking.
+ :param seek_whence: If supplied, pass to ``stream.seek()`` when seeking.
"""
if content_type is None:
content_type = UTF8_TEXT
@@ -353,9 +355,9 @@ def attach_file(detailed, path, name=None, content_type=None,
This is a convenience method wrapping around ``addDetail``.
- Note that unless 'read_now' is explicitly passed in as True, the file
- *must* exist when the test result is called with the results of this
- test, after the test has been torn down.
+ Note that by default the contents of the file will be read immediately. If
+ ``buffer_now`` is False, then the file *must* exist when the test result is
+ called with the results of this test, after the test has been torn down.
:param detailed: An object with details
:param path: The path to the file to attach.
diff --git a/lib/testtools/testtools/tests/test_content.py b/lib/testtools/testtools/tests/test_content.py
index 342ae23..09feebd 100644
--- a/lib/testtools/testtools/tests/test_content.py
+++ b/lib/testtools/testtools/tests/test_content.py
@@ -196,6 +196,17 @@ class TestContent(TestCase):
data = _b("Some Bytes")
self.assertRaises(TypeError, text_content, data)
+ def test_text_content_raises_TypeError_when_passed_non_text(self):
+ bad_values = (None, list(), dict(), 42, 1.23)
+ for value in bad_values:
+ self.assertThat(
+ lambda: text_content(value),
+ raises(
+ TypeError("text_content must be given text, not '%s'." %
+ type(value).__name__)
+ ),
+ )
+
def test_json_content(self):
data = {'foo': 'bar'}
expected = Content(JSON, lambda: [_b('{"foo": "bar"}')])
diff --git a/lib/testtools/testtools/tests/test_testresult.py b/lib/testtools/testtools/tests/test_testresult.py
index a0a8aa3..a8034b2 100644
--- a/lib/testtools/testtools/tests/test_testresult.py
+++ b/lib/testtools/testtools/tests/test_testresult.py
@@ -9,6 +9,7 @@ import datetime
import doctest
from itertools import chain, combinations
import os
+import re
import shutil
import sys
import tempfile
@@ -69,6 +70,7 @@ from testtools.matchers import (
HasLength,
MatchesAny,
MatchesException,
+ MatchesRegex,
Raises,
)
from testtools.tests.helpers import (
@@ -2547,13 +2549,18 @@ class TestNonAsciiResults(TestCase):
self._write_module("bad", "iso-8859-5",
"# coding: iso-8859-5\n%% = 0 # %s\n" % text)
textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- #'bad.py", line 2\n'
- ' %% = 0 # %s\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: ') %
- (text,)), textoutput)
+ self.assertThat(
+ textoutput,
+ MatchesRegex(
+ self._as_output(_u(
+ #'bad.py", line 2\n'
+ '.*%% = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ '\\s*\\^\n'
+ 'SyntaxError:.*') %
+ (text,)),
+ re.MULTILINE | re.DOTALL)
+ )
def test_syntax_error_line_euc_jp(self):
"""Syntax error on a euc_jp line shows the line decoded"""
@@ -2579,13 +2586,17 @@ class TestNonAsciiResults(TestCase):
textoutput = self._setup_external_case("import bad")
self._write_module("bad", "utf-8", _u("\ufeff^ = 0 # %s\n") % text)
textoutput = self._run_external_case()
- self.assertIn(self._as_output(_u(
- 'bad.py", line 1\n'
- ' ^ = 0 # %s\n'
- + ' ' * self._error_on_character +
- ' ^\n'
- 'SyntaxError: ') %
- text), textoutput)
+ self.assertThat(
+ textoutput,
+ MatchesRegex(
+ self._as_output(_u(
+ '.*bad.py", line 1\n'
+ '\\s*\\^ = 0 # %s\n'
+ + ' ' * self._error_on_character +
+ '\\s*\\^\n'
+ 'SyntaxError:.*') % text),
+ re.M | re.S)
+ )
class TestNonAsciiResultsWithUnittest(TestNonAsciiResults):
--
2.1.1
More information about the samba-technical
mailing list