Blob Blame History Raw
diff --git a/README.rst b/README.rst
index 4ef7ce9..3298b9c 100644
--- a/README.rst
+++ b/README.rst
@@ -13,7 +13,7 @@ flaky
 About
 -----
 
-Flaky is a plugin for nose or pytest that automatically reruns flaky tests.
+Flaky is a plugin for pytest that automatically reruns flaky tests.
 
 Ideally, tests reliably pass or fail, but sometimes test fixtures must rely on components that aren't 100%
 reliable. With flaky, instead of removing those tests or marking them to @skip, they can be automatically
@@ -118,12 +118,6 @@ It can also be used to incur a delay between test retries:
 Activating the plugin
 ~~~~~~~~~~~~~~~~~~~~~
 
-Like any nose plugin, flaky can be activated via the command line:
-
-.. code-block:: console
-
-    nosetests --with-flaky
-
 With pytest, flaky will automatically run. It can, however be disabled via the command line:
 
 .. code-block:: console
@@ -152,7 +146,7 @@ Pass ``--max-runs=MAX_RUNS`` and/or ``--min-passes=MIN_PASSES`` to control the b
 is specified. Flaky decorators on individual tests will override these defaults.
 
 
-*Additional usage examples are in the code - see test/test_nose/test_nose_example.py and test/test_pytest/test_pytest_example.py*
+*Additional usage examples are in the code - see test/test_pytest/test_pytest_example.py*
 
 Installation
 ------------
@@ -169,8 +163,6 @@ Compatibility
 
 Flaky is tested with the following test runners and options:
 
-- Nosetests. Doctests cannot be marked flaky.
-
 - Py.test. Works with ``pytest-xdist`` but not with the ``--boxed`` option. Doctests cannot be marked flaky.
 
 
diff --git a/flaky/_flaky_plugin.py b/flaky/_flaky_plugin.py
index df4ce53..c9341c1 100644
--- a/flaky/_flaky_plugin.py
+++ b/flaky/_flaky_plugin.py
@@ -113,7 +113,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :return:
             True, if the test needs to be rerun; False, otherwise.
         :rtype:
@@ -135,7 +135,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :param name:
             The name of the test that has raised an error
         :type name:
@@ -164,7 +164,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :param err:
             Information about the test failure (from sys.exc_info())
         :type err:
@@ -209,7 +209,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :param name:
             The test name
         :type name:
@@ -233,7 +233,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error or succeeded
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         """
         raise NotImplementedError  # pragma: no cover
 
@@ -258,7 +258,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :return:
             True, if the test will be rerun; False, if the test runner should handle it.
         :rtype:
@@ -409,7 +409,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that is being prepared to run
         :type test:
-            :class:`nose.case.Test`
+            :class:`Function`
         """
         test_callable = cls._get_test_callable(test)
         if test_callable is None:
@@ -432,7 +432,7 @@ class _FlakyPlugin(object):
         :param test_item:
             The test method from which to get the attribute
         :type test_item:
-            `callable` or :class:`nose.case.Test` or :class:`Function`
+            `callable` or :class:`Function`
         :param flaky_attribute:
             The name of the attribute to get
         :type flaky_attribute:
@@ -454,7 +454,7 @@ class _FlakyPlugin(object):
         :param test_item:
             The test callable on which to set the attribute
         :type test_item:
-            `callable` or :class:`nose.case.Test` or :class:`Function`
+            `callable` or :class:`Function`
         :param flaky_attribute:
             The name of the attribute to set
         :type flaky_attribute:
@@ -474,7 +474,7 @@ class _FlakyPlugin(object):
         :param test_item:
             The test callable on which to set the attribute
         :type test_item:
-            `callable` or :class:`nose.case.Test` or :class:`Function`
+            `callable` or :class:`Function`
         :param flaky_attribute:
             The name of the attribute to set
         :type flaky_attribute:
@@ -490,7 +490,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that is being prepared to run
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :return:
         :rtype:
             `bool`
@@ -506,7 +506,7 @@ class _FlakyPlugin(object):
         :param test_item:
             The test callable from which to get the flaky related attributes.
         :type test_item:
-            `callable` or :class:`nose.case.Test` or :class:`Function`
+            `callable` or :class:`Function`
         :return:
         :rtype:
             `dict` of `unicode` to varies
@@ -526,7 +526,7 @@ class _FlakyPlugin(object):
         :param test:
             The flaky test on which to update the flaky attributes.
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :param err:
             Information about the test failure (from sys.exc_info())
         :type err:
@@ -587,7 +587,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error or succeeded
         :type test:
-            :class:`nose.case.Test` or :class:`pytest.Item`
+            :class:`pytest.Item`
         :return:
             The test declaration, callable and name that is being run
         :rtype:
@@ -603,7 +603,7 @@ class _FlakyPlugin(object):
         :param test:
             The test that has raised an error or succeeded
         :type test:
-            :class:`nose.case.Test` or :class:`pytest.Item`
+            :class:`pytest.Item`
         :return:
             The name of the test callable that is being run by the test
         :rtype:
@@ -619,7 +619,7 @@ class _FlakyPlugin(object):
         :param test:
             The test in question.
         :type test:
-            :class:`nose.case.Test` or :class:`Function`
+            :class:`Function`
         :param max_runs:
             The value of the FlakyNames.MAX_RUNS attribute to use.
         :type max_runs:
@@ -636,9 +636,9 @@ class _FlakyPlugin(object):
                 Information about the test failure (from sys.exc_info())
             - name (`unicode`):
                 The test name
-            - test (:class:`nose.case.Test` or :class:`Function`):
+            - test (:class:`Function`):
                 The test that has raised an error
-            - plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
+            - plugin (:class:`FlakyPytestPlugin`):
                 The flaky plugin. Has a :prop:`stream` that can be written to in
                 order to add to the Flaky Report.
         :type rerun_filter:
diff --git a/flaky/flaky_decorator.py b/flaky/flaky_decorator.py
index 36de7b3..7050c70 100644
--- a/flaky/flaky_decorator.py
+++ b/flaky/flaky_decorator.py
@@ -7,9 +7,7 @@ from flaky.defaults import default_flaky_attributes
 
 def flaky(max_runs=None, min_passes=None, rerun_filter=None):
     """
-    Decorator used to mark a test as "flaky". When used in conjuction with
-    the flaky nosetests plugin, will cause the decorated test to be retried
-    until min_passes successes are achieved out of up to max_runs test runs.
+    Decorator used to mark a test as "flaky".
 
     :param max_runs:
         The maximum number of times the decorated test will be run.
@@ -27,9 +25,9 @@ def flaky(max_runs=None, min_passes=None, rerun_filter=None):
             Information about the test failure (from sys.exc_info())
         - name (`unicode`):
             The test name
-        - test (:class:`nose.case.Test` or :class:`Function`):
+        - test (:class:`Function`):
             The test that has raised an error
-        - plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
+        - plugin (:class:`FlakyPytestPlugin`):
             The flaky plugin. Has a :prop:`stream` that can be written to in
             order to add to the Flaky Report.
     :type rerun_filter:
diff --git a/flaky/flaky_nose_plugin.py b/flaky/flaky_nose_plugin.py
deleted file mode 100644
index c2d28b9..0000000
--- a/flaky/flaky_nose_plugin.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# coding: utf-8
-
-from __future__ import unicode_literals
-
-import logging
-from optparse import OptionGroup
-import os
-
-from nose.failure import Failure
-from nose.plugins import Plugin
-from nose.result import TextTestResult
-
-from flaky._flaky_plugin import _FlakyPlugin
-
-
-class FlakyPlugin(_FlakyPlugin, Plugin):
-    """
-    Plugin for nosetests that allows retrying flaky tests.
-    """
-    name = 'flaky'
-
-    def __init__(self):
-        super(FlakyPlugin, self).__init__()
-        self._logger = logging.getLogger('nose.plugins.flaky')
-        self._flaky_result = None
-        self._nose_result = None
-        self._flaky_report = True
-        self._force_flaky = False
-        self._max_runs = None
-        self._min_passes = None
-        self._test_status = {}
-        self._tests_that_reran = set()
-        self._tests_that_have_been_reported = set()
-
-    def options(self, parser, env=os.environ):
-        """
-        Base class override.
-        Add options to the nose argument parser.
-        """
-        # pylint:disable=dangerous-default-value
-        super(FlakyPlugin, self).options(parser, env=env)
-        self.add_report_option(parser.add_option)
-        group = OptionGroup(
-            parser, "Force flaky", "Force all tests to be flaky.")
-        self.add_force_flaky_options(group.add_option)
-        parser.add_option_group(group)
-
-    def _get_stream(self, multiprocess=False):
-        """
-        Get the stream used to store the flaky report.
-        If this nose run is going to use the multiprocess plugin, then use
-        a multiprocess-list backed StringIO proxy; otherwise, use the default
-        stream.
-
-        :param multiprocess:
-            Whether or not this test run is configured for multiprocessing.
-        :type multiprocess:
-            `bool`
-        :return:
-            The stream to use for storing the flaky report.
-        :rtype:
-            :class:`StringIO` or :class:`MultiprocessingStringIO`
-        """
-        if multiprocess:
-            from flaky.multiprocess_string_io import MultiprocessingStringIO
-            return MultiprocessingStringIO()
-        return self._stream
-
-    def configure(self, options, conf):
-        """Base class override."""
-        super(FlakyPlugin, self).configure(options, conf)
-        if not self.enabled:
-            return
-        is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
-        self._stream = self._get_stream(is_multiprocess)
-        self._flaky_result = TextTestResult(self._stream, [], 0)
-        self._flaky_report = options.flaky_report
-        self._flaky_success_report = options.flaky_success_report
-        self._force_flaky = options.force_flaky
-        self._max_runs = options.max_runs
-        self._min_passes = options.min_passes
-
-    def startTest(self, test):
-        """
-        Base class override. Called before a test is run.
-
-        Add the test to the test status tracker, so it can potentially
-        be rerun during afterTest.
-
-        :param test:
-            The test that is going to be run.
-        :type test:
-            :class:`nose.case.Test`
-        """
-        # pylint:disable=invalid-name
-        self._test_status[test] = None
-
-    def afterTest(self, test):
-        """
-        Base class override. Called after a test is run.
-
-        If the test was marked for rerun, rerun the test.
-
-        :param test:
-            The test that has been run.
-        :type test:
-            :class:`nose.case.Test`
-        """
-        # pylint:disable=invalid-name
-        if self._test_status[test]:
-            self._tests_that_reran.add(id(test))
-            test.run(self._flaky_result)
-        self._test_status.pop(test, None)
-
-    def _mark_test_for_rerun(self, test):
-        """
-        Base class override. Rerun a flaky test.
-
-        In this case, don't actually rerun the test, but mark it for
-        rerun during afterTest.
-
-        :param test:
-            The test that is going to be rerun.
-        :type test:
-            :class:`nose.case.Test`
-        """
-        self._test_status[test] = True
-
-    def handleError(self, test, err):
-        """
-        Baseclass override. Called when a test raises an exception.
-
-        If the test isn't going to be rerun again, then report the error
-        to the nose test result.
-
-        :param test:
-            The test that has raised an error
-        :type test:
-            :class:`nose.case.Test`
-        :param err:
-            Information about the test failure (from sys.exc_info())
-        :type err:
-            `tuple` of `class`, :class:`Exception`, `traceback`
-        :return:
-            True, if the test will be rerun; False, if nose should handle it.
-        :rtype:
-            `bool`
-        """
-        # pylint:disable=invalid-name
-        want_error = self._handle_test_error_or_failure(test, err)
-        if not want_error and id(test) in self._tests_that_reran:
-            self._nose_result.addError(test, err)
-        return want_error or None
-
-    def handleFailure(self, test, err):
-        """
-        Baseclass override. Called when a test fails.
-
-        If the test isn't going to be rerun again, then report the failure
-        to the nose test result.
-
-        :param test:
-            The test that has raised an error
-        :type test:
-            :class:`nose.case.Test`
-        :param err:
-            Information about the test failure (from sys.exc_info())
-        :type err:
-            `tuple` of `class`, :class:`Exception`, `traceback`
-        :return:
-            True, if the test will be rerun; False, if nose should handle it.
-        :rtype:
-            `bool`
-        """
-        # pylint:disable=invalid-name
-        want_failure = self._handle_test_error_or_failure(test, err)
-        if not want_failure and id(test) in self._tests_that_reran:
-            self._nose_result.addFailure(test, err)
-        return want_failure or None
-
-    def addSuccess(self, test):
-        """
-        Baseclass override. Called when a test succeeds.
-
-        Count remaining retries and compare with number of required successes
-        that have not yet been achieved; retry if necessary.
-
-        Returning True from this method keeps the test runner from reporting
-        the test as a success; this way we can retry and only report as a
-        success if we have achieved the required number of successes.
-
-        :param test:
-            The test that has succeeded
-        :type test:
-            :class:`nose.case.Test`
-        :return:
-            True, if the test will be rerun; False, if nose should handle it.
-        :rtype:
-            `bool`
-        """
-        # pylint:disable=invalid-name
-        will_handle = self._handle_test_success(test)
-        test_id = id(test)
-        # If this isn't a rerun, the builtin reporter is going to report it as a success
-        if will_handle and test_id not in self._tests_that_reran:
-            self._tests_that_have_been_reported.add(test_id)
-        # If this test hasn't already been reported as successful, then do it now
-        if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
-            self._nose_result.addSuccess(test)
-        return will_handle or None
-
-    def report(self, stream):
-        """
-        Baseclass override. Write details about flaky tests to the test report.
-
-        :param stream:
-            The test stream to which the report can be written.
-        :type stream:
-            `file`
-        """
-        if self._flaky_report:
-            self._add_flaky_report(stream)
-
-    def prepareTestResult(self, result):
-        """
-        Baseclass override. Called right before the first test is run.
-
-        Stores the test result so that errors and failures can be reported
-        to the nose test result.
-
-        :param result:
-            The nose test result that needs to be informed of test failures.
-        :type result:
-            :class:`nose.result.TextTestResult`
-        """
-        # pylint:disable=invalid-name
-        self._nose_result = result
-
-    def prepareTestCase(self, test):
-        """
-        Baseclass override. Called right before a test case is run.
-
-        If the test class is marked flaky and the test callable is not, copy
-        the flaky attributes from the test class to the test callable.
-
-        :param test:
-            The test that is being prepared to run
-        :type test:
-            :class:`nose.case.Test`
-        """
-        # pylint:disable=invalid-name
-        if not isinstance(test.test, Failure):
-            test_class = test.test
-            self._copy_flaky_attributes(test, test_class)
-            if self._force_flaky and not self._has_flaky_attributes(test):
-                self._make_test_flaky(
-                    test, self._max_runs, self._min_passes)
-
-    @staticmethod
-    def _get_test_callable_name(test):
-        """
-        Base class override.
-        """
-        _, _, class_and_callable_name = test.address()
-        first_dot_index = class_and_callable_name.find('.')
-        test_callable_name = class_and_callable_name[first_dot_index + 1:]
-        return test_callable_name
-
-    @classmethod
-    def _get_test_callable(cls, test):
-        """
-        Base class override.
-
-        :param test:
-            The test that has raised an error or succeeded
-        :type test:
-            :class:`nose.case.Test`
-        """
-        callable_name = cls._get_test_callable_name(test)
-        test_callable = getattr(
-            test.test,
-            callable_name,
-            getattr(test.test, 'test', test.test),
-        )
-        return test_callable
diff --git a/setup.py b/setup.py
index c794470..45882b0 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,7 @@ def main():
     setup(
         name='flaky',
         version='3.7.0',
-        description='Plugin for nose or pytest that automatically reruns flaky tests.',
+        description='Plugin for pytest that automatically reruns flaky tests.',
         long_description=open(join(base_dir, 'README.rst')).read(),
         author='Box',
         author_email='oss@box.com',
@@ -68,14 +68,11 @@ def main():
         cmdclass={'test': Tox},
         zip_safe=False,
         entry_points={
-            'nose.plugins.0.10': [
-                'flaky = flaky.flaky_nose_plugin:FlakyPlugin'
-            ],
             'pytest11': [
                 'flaky = flaky.flaky_pytest_plugin'
             ]
         },
-        keywords='nose pytest plugin flaky tests rerun retry',
+        keywords='pytest plugin flaky tests rerun retry',
         python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
         classifiers=CLASSIFIERS,
     )
diff --git a/test/test_nose/__init__.py b/test/test_nose/__init__.py
deleted file mode 100644
index 51a5579..0000000
--- a/test/test_nose/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# coding: utf-8
-
-from __future__ import unicode_literals, absolute_import
diff --git a/test/test_nose/test_flaky_nose_plugin.py b/test/test_nose/test_flaky_nose_plugin.py
deleted file mode 100644
index 0554ae1..0000000
--- a/test/test_nose/test_flaky_nose_plugin.py
+++ /dev/null
@@ -1,446 +0,0 @@
-# coding: utf-8
-
-from __future__ import unicode_literals
-
-from unittest import TestCase
-
-from genty import genty, genty_dataset
-import mock
-from mock import MagicMock, Mock, patch
-
-from flaky import defaults, flaky_nose_plugin
-from flaky.flaky_decorator import flaky
-from flaky.names import FlakyNames
-
-
-@genty
-class TestFlakyNosePlugin(TestCase):
-    def setUp(self):
-        super(TestFlakyNosePlugin, self).setUp()
-
-        self._mock_test_result = MagicMock()
-        self._mock_stream = None
-        self._flaky_plugin = flaky_nose_plugin.FlakyPlugin()
-        self._mock_nose_result = Mock(flaky_nose_plugin.TextTestResult)
-        self._flaky_plugin.prepareTestResult(self._mock_nose_result)
-        self._mock_test = MagicMock(name='flaky_plugin_test')
-        self._mock_test_case = MagicMock(
-            name='flaky_plugin_test_case',
-            spec=TestCase
-        )
-        self._mock_test_case.address = MagicMock()
-        self._mock_test_case.test = self._mock_test
-        self._mock_test_module_name = 'test_module'
-        self._mock_test_class_name = 'TestClass'
-        self._mock_test_method_name = 'test_method'
-        self._mock_test_names = '{}:{}.{}'.format(
-            self._mock_test_module_name,
-            self._mock_test_class_name,
-            self._mock_test_method_name
-        )
-        self._mock_exception = Exception('Error in {}'.format(
-            self._mock_test_method_name)
-        )
-        self._mock_stack_trace = ''
-        self._mock_exception_type = Exception
-        self._mock_error = (
-            self._mock_exception_type,
-            self._mock_exception,
-            None,
-        )
-        self._mock_test_method = MagicMock(
-            name=self._mock_test_method_name,
-            spec=['__call__'] + list(FlakyNames().items()),
-        )
-        setattr(
-            self._mock_test,
-            self._mock_test_method_name,
-            self._mock_test_method,
-        )
-
-    def _assert_flaky_plugin_configured(self):
-        options = Mock()
-        options.multiprocess_workers = 0
-        conf = Mock()
-        self._flaky_plugin.enabled = True
-        with patch.object(flaky_nose_plugin, 'TextTestResult') as flaky_result:
-            flaky_result.return_value = self._mock_test_result
-            from io import StringIO
-            self._mock_stream = MagicMock(spec=StringIO)
-            with patch.object(self._flaky_plugin, '_get_stream') as get_stream:
-                get_stream.return_value = self._mock_stream
-                self._flaky_plugin.configure(options, conf)
-
-    def test_flaky_plugin_report(self):
-        flaky_report = 'Flaky tests passed; others failed. ' \
-                       'No more tests; that ship has sailed.'
-        self._test_flaky_plugin_report(flaky_report)
-
-    def test_flaky_plugin_handles_success_for_test_method(self):
-        self._test_flaky_plugin_handles_success()
-
-    def test_flaky_plugin_handles_success_for_test_instance(self):
-        self._test_flaky_plugin_handles_success(is_test_method=False)
-
-    def test_flaky_plugin_handles_success_for_needs_rerun(self):
-        self._test_flaky_plugin_handles_success(min_passes=2)
-
-    def test_flaky_plugin_ignores_success_for_non_flaky_test(self):
-        self._expect_test_not_flaky()
-        self._flaky_plugin.addSuccess(self._mock_test_case)
-        self._assert_test_ignored()
-
-    def test_flaky_plugin_ignores_error_for_non_flaky_test(self):
-        self._expect_test_not_flaky()
-        self._flaky_plugin.handleError(self._mock_test_case, None)
-        self._assert_test_ignored()
-
-    def test_flaky_plugin_ignores_failure_for_non_flaky_test(self):
-        self._expect_test_not_flaky()
-        self._flaky_plugin.handleFailure(self._mock_test_case, None)
-        self._assert_test_ignored()
-
-    def test_flaky_plugin_ignores_error_for_nose_failure(self):
-        self._mock_test_case.address.return_value = (
-            None,
-            self._mock_test_module_name,
-            None,
-        )
-        self._flaky_plugin.handleError(self._mock_test_case, None)
-        self._assert_test_ignored()
-
-    def test_flaky_plugin_handles_error_for_test_method(self):
-        self._test_flaky_plugin_handles_failure_or_error()
-
-    def test_flaky_plugin_handles_error_for_test_instance(self):
-        self._test_flaky_plugin_handles_failure_or_error(is_test_method=False)
-
-    def test_flaky_plugin_handles_failure_for_test_method(self):
-        self._test_flaky_plugin_handles_failure_or_error(is_failure=True)
-
-    def test_flaky_plugin_handles_failure_for_test_instance(self):
-        self._test_flaky_plugin_handles_failure_or_error(
-            is_failure=True,
-            is_test_method=False
-        )
-
-    def test_flaky_plugin_handles_failure_for_no_more_retries(self):
-        self._test_flaky_plugin_handles_failure_or_error(
-            is_failure=True,
-            max_runs=1
-        )
-
-    def test_flaky_plugin_handles_additional_errors(self):
-        self._test_flaky_plugin_handles_failure_or_error(
-            current_errors=[self._mock_error]
-        )
-
-    def test_flaky_plugin_handles_bare_test(self):
-        self._mock_test_names = self._mock_test_method_name
-        self._mock_test.test = Mock()
-        self._expect_call_test_address()
-        attrib = defaults.default_flaky_attributes(2, 1)
-        for name, value in attrib.items():
-            setattr(
-                self._mock_test.test,
-                name,
-                value,
-            )
-        delattr(self._mock_test, self._mock_test_method_name)
-        self._flaky_plugin.prepareTestCase(self._mock_test_case)
-        self.assertTrue(self._flaky_plugin.handleError(
-            self._mock_test_case,
-            self._mock_error,
-        ))
-        self.assertFalse(self._flaky_plugin.handleError(
-            self._mock_test_case,
-            self._mock_error,
-        ))
-
-    def _expect_call_test_address(self):
-        self._mock_test_case.address.return_value = (
-            None,
-            None,
-            self._mock_test_names
-        )
-
-    def _expect_test_flaky(self, is_test_method, max_runs, min_passes):
-        self._expect_call_test_address()
-        if is_test_method:
-            mock_test_method = getattr(
-                self._mock_test,
-                self._mock_test_method_name
-            )
-            for flaky_attr in FlakyNames():
-                setattr(self._mock_test, flaky_attr, None)
-                setattr(mock_test_method, flaky_attr, None)
-            flaky(max_runs, min_passes)(mock_test_method)
-        else:
-            flaky(max_runs, min_passes)(self._mock_test)
-            mock_test_method = getattr(
-                self._mock_test,
-                self._mock_test_method_name
-            )
-            for flaky_attr in FlakyNames():
-                setattr(mock_test_method, flaky_attr, None)
-
-    def _expect_test_not_flaky(self):
-        self._expect_call_test_address()
-        for test_object in (
-            self._mock_test,
-            getattr(self._mock_test, self._mock_test_method_name)
-        ):
-            for flaky_attr in FlakyNames():
-                setattr(test_object, flaky_attr, None)
-
-    def _assert_test_ignored(self):
-        self._mock_test_case.address.assert_called_with()
-        self.assertEqual(
-            self._mock_test_case.mock_calls,
-            [mock.call.address()],
-        )
-        self.assertEqual(self._mock_test.mock_calls, [])
-        self.assertEqual(self._mock_nose_result.mock_calls, [])
-
-    def _get_flaky_attributes(self):
-        actual_flaky_attributes = {
-            attr: getattr(
-                self._mock_test_case,
-                attr,
-                None,
-            ) for attr in FlakyNames()
-        }
-        for key, value in actual_flaky_attributes.items():
-            if isinstance(value, list):
-                actual_flaky_attributes[key] = tuple(value)
-        return actual_flaky_attributes
-
-    def _set_flaky_attribute(self, attr, value):
-        setattr(self._mock_test, attr, value)
-
-    def _assert_flaky_attributes_contains(
-        self,
-        expected_flaky_attributes,
-    ):
-        actual_flaky_attributes = self._get_flaky_attributes()
-        self.assertDictContainsSubset(
-            expected_flaky_attributes,
-            actual_flaky_attributes,
-            'Unexpected flaky attributes. Expected {} got {}'.format(
-                expected_flaky_attributes,
-                actual_flaky_attributes
-            )
-        )
-
-    def _test_flaky_plugin_handles_failure_or_error(
-        self,
-        current_errors=None,
-        current_passes=0,
-        current_runs=0,
-        is_failure=False,
-        is_test_method=True,
-        max_runs=2,
-        min_passes=1,
-    ):
-        self._assert_flaky_plugin_configured()
-        self._expect_test_flaky(is_test_method, max_runs, min_passes)
-        if current_errors is None:
-            current_errors = [self._mock_error]
-        else:
-            current_errors.append(self._mock_error)
-        self._set_flaky_attribute(
-            FlakyNames.CURRENT_ERRORS,
-            current_errors,
-        )
-        self._set_flaky_attribute(
-            FlakyNames.CURRENT_PASSES,
-            current_passes,
-        )
-        self._set_flaky_attribute(
-            FlakyNames.CURRENT_RUNS,
-            current_runs,
-        )
-
-        retries_remaining = current_runs + 1 < max_runs
-        too_few_passes = current_passes < min_passes
-        expected_plugin_handles_failure = too_few_passes and retries_remaining
-        did_plugin_retry_test = max_runs > 1
-
-        self._flaky_plugin.prepareTestCase(self._mock_test_case)
-        if is_failure:
-            actual_plugin_handles_failure = self._flaky_plugin.handleFailure(
-                self._mock_test_case,
-                self._mock_error,
-            )
-        else:
-            actual_plugin_handles_failure = self._flaky_plugin.handleError(
-                self._mock_test_case,
-                self._mock_error,
-            )
-
-        self.assertEqual(
-            expected_plugin_handles_failure or None,
-            actual_plugin_handles_failure,
-            'Expected plugin{} to handle the test run, but it did{}.'.format(
-                ' to' if expected_plugin_handles_failure else '',
-                '' if actual_plugin_handles_failure else ' not'
-            ),
-        )
-        self._assert_flaky_attributes_contains(
-            {
-                FlakyNames.CURRENT_RUNS: current_runs + 1,
-                FlakyNames.CURRENT_ERRORS: tuple(current_errors),
-            },
-        )
-        expected_test_case_calls = [mock.call.address(), mock.call.address()]
-        expected_result_calls = []
-        if expected_plugin_handles_failure:
-            expected_test_case_calls.append(('__hash__',))
-            expected_stream_calls = [mock.call.writelines([
-                self._mock_test_method_name,
-                ' failed ({} runs remaining out of {}).'.format(
-                    max_runs - current_runs - 1, max_runs
-                ),
-                'Exception: Error in test_method',
-                '\n',
-            ])]
-        else:
-            if did_plugin_retry_test:
-                if is_failure:
-                    expected_result_calls.append(
-                        mock.call.addFailure(
-                            self._mock_test_case,
-                            self._mock_error,
-                        ),
-                    )
-                else:
-                    expected_result_calls.append(mock.call.addError(
-                        self._mock_test_case,
-                        self._mock_error,
-                    ))
-            expected_stream_calls = [mock.call.writelines([
-                self._mock_test_method_name,
-                ' failed; it passed {} out of the required {} times.'.format(
-                    current_passes,
-                    min_passes
-                ),
-                'Exception: Error in test_method',
-                '\n'
-            ])]
-        self.assertEqual(
-            self._mock_nose_result.mock_calls,
-            expected_result_calls,
-        )
-        self.assertEqual(
-            self._mock_test_case.mock_calls,
-            expected_test_case_calls,
-            'Unexpected TestCase calls: {} vs {}'.format(
-                self._mock_test_case.mock_calls,
-                expected_test_case_calls
-            )
-        )
-        self.assertEqual(self._mock_stream.mock_calls, expected_stream_calls)
-
-    def _test_flaky_plugin_handles_success(
-        self,
-        current_passes=0,
-        current_runs=0,
-        is_test_method=True,
-        max_runs=2,
-        min_passes=1
-    ):
-        self._assert_flaky_plugin_configured()
-        self._expect_test_flaky(is_test_method, max_runs, min_passes)
-        self._set_flaky_attribute(
-            FlakyNames.CURRENT_PASSES,
-            current_passes,
-        )
-        self._set_flaky_attribute(
-            FlakyNames.CURRENT_RUNS,
-            current_runs,
-        )
-
-        retries_remaining = current_runs + 1 < max_runs
-        too_few_passes = current_passes + 1 < min_passes
-        expected_plugin_handles_success = too_few_passes and retries_remaining
-
-        self._flaky_plugin.prepareTestCase(self._mock_test_case)
-        actual_plugin_handles_success = self._flaky_plugin.addSuccess(
-            self._mock_test_case,
-        )
-
-        self.assertEqual(
-            expected_plugin_handles_success or None,
-            actual_plugin_handles_success,
-            'Expected plugin{} to handle the test run, but it did{}.'.format(
-                ' not' if expected_plugin_handles_success else '',
-                '' if actual_plugin_handles_success else ' not'
-            ),
-        )
-        self._assert_flaky_attributes_contains(
-            {
-                FlakyNames.CURRENT_RUNS: current_runs + 1,
-                FlakyNames.CURRENT_PASSES: current_passes + 1,
-            },
-        )
-        expected_test_case_calls = [mock.call.address(), mock.call.address()]
-        expected_stream_calls = [mock.call.writelines([
-            self._mock_test_method_name,
-            " passed {} out of the required {} times. ".format(
-                current_passes + 1,
-                min_passes,
-            ),
-        ])]
-        if expected_plugin_handles_success:
-            _rerun_text = 'Running test again until it passes {0} times.\n'
-            expected_test_case_calls.append(('__hash__',))
-            expected_stream_calls.append(
-                mock.call.write(_rerun_text.format(min_passes)),
-            )
-        else:
-            expected_stream_calls.append(mock.call.write('Success!\n'))
-        self.assertEqual(
-            self._mock_test_case.mock_calls,
-            expected_test_case_calls,
-            'Unexpected TestCase calls = {} vs {}'.format(
-                self._mock_test_case.mock_calls,
-                expected_test_case_calls,
-            ),
-        )
-        self.assertEqual(self._mock_stream.mock_calls, expected_stream_calls)
-
-    def _test_flaky_plugin_report(self, expected_stream_value):
-        self._assert_flaky_plugin_configured()
-        mock_stream = Mock()
-        self._mock_stream.getvalue.return_value = expected_stream_value
-
-        self._flaky_plugin.report(mock_stream)
-
-        self.assertEqual(
-            mock_stream.mock_calls,
-            [
-                mock.call.write('===Flaky Test Report===\n\n'),
-                mock.call.write(expected_stream_value),
-                mock.call.write('\n===End Flaky Test Report===\n'),
-            ],
-        )
-
-    @genty_dataset(
-        multiprocess_plugin_absent=(None, 'StringIO'),
-        processes_argument_absent=(0, 'StringIO'),
-        processes_equals_one=(1, 'MultiprocessingStringIO'),
-        processes_equals_two=(2, 'MultiprocessingStringIO'),
-    )
-    def test_flaky_plugin_get_stream(self, mp_workers, expected_class_name):
-        options = Mock()
-        conf = Mock()
-        self._flaky_plugin.enabled = True
-        options.multiprocess_workers = mp_workers
-        if mp_workers is None:
-            del options.multiprocess_workers
-        self._flaky_plugin.configure(options, conf)
-        # pylint:disable=protected-access
-        self.assertEqual(
-            self._flaky_plugin._stream.__class__.__name__,
-            expected_class_name,
-        )
diff --git a/test/test_nose/test_nose_example.py b/test/test_nose/test_nose_example.py
deleted file mode 100644
index 5643678..0000000
--- a/test/test_nose/test_nose_example.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# coding: utf-8
-
-from __future__ import unicode_literals
-
-from unittest import TestCase, skip
-
-from genty import genty, genty_dataset
-from nose.tools import raises
-
-from flaky import flaky
-
-
-# This is an end-to-end example of the flaky package in action. Consider it
-# a live tutorial, showing the various features in action.
-
-
-class ExampleTests(TestCase):
-    _threshold = -1
-
-    def test_non_flaky_thing(self):
-        """Flaky will not interact with this test"""
-
-    @raises(AssertionError)
-    def test_non_flaky_failing_thing(self):
-        """Flaky will also not interact with this test"""
-        self.assertEqual(0, 1)
-
-    @flaky(3, 2)
-    def test_flaky_thing_that_fails_then_succeeds(self):
-        """
-        Flaky will run this test 3 times. It will fail once and then succeed twice.
-        """
-        self._threshold += 1
-        if self._threshold < 1:
-            raise Exception("Threshold is not high enough: {} vs {}.".format(
-                self._threshold, 1),
-            )
-
-    @flaky(3, 2)
-    def test_flaky_thing_that_succeeds_then_fails_then_succeeds(self):
-        """
-        Flaky will run this test 3 times. It will succeed once, fail once, and then succeed one more time.
-        """
-        self._threshold += 1
-        if self._threshold == 1:
-            self.assertEqual(0, 1)
-
-    @flaky(2, 2)
-    def test_flaky_thing_that_always_passes(self):
-        """Flaky will run this test twice.  Both will succeed."""
-
-    @skip("This really fails! Remove this decorator to see the test failure.")
-    @flaky()
-    def test_flaky_thing_that_always_fails(self):
-        """Flaky will run this test twice.  Both will fail."""
-        self.assertEqual(0, 1)
-
-
-@flaky
-class ExampleFlakyTests(TestCase):
-    _threshold = -1
-
-    def test_flaky_thing_that_fails_then_succeeds(self):
-        """
-        Flaky will run this test twice. It will fail once and then succeed.
-        """
-        self._threshold += 1
-        if self._threshold < 1:
-            raise Exception("Threshold is not high enough: {} vs {}.".format(
-                self._threshold, 1),
-            )
-
-
-def test_function():
-    """
-    Nose will import this function and wrap it in a :class:`FunctionTestCase`.
-    It's included in the example to make sure flaky handles it correctly.
-    """
-
-
-@flaky
-def test_flaky_function(param=[]):
-    # pylint:disable=dangerous-default-value
-    param_length = len(param)
-    param.append(None)
-    assert param_length == 1
-
-
-@genty
-class ExampleFlakyTestsWithUnicodeTestNames(ExampleFlakyTests):
-    @genty_dataset('ascii name', 'ńőń ȁŝćȉȉ ŝƭȕƒƒ')
-    def test_non_flaky_thing(self, message):
-        self._threshold += 1
-        if self._threshold < 1:
-            raise Exception(
-                "Threshold is not high enough: {} vs {} for '{}'.".format(
-                    self._threshold, 1, message),
-            )
diff --git a/test/test_nose/test_nose_options_example.py b/test/test_nose/test_nose_options_example.py
deleted file mode 100644
index 2928a36..0000000
--- a/test/test_nose/test_nose_options_example.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# coding: utf-8
-
-from __future__ import unicode_literals
-
-from unittest import TestCase
-
-from flaky import flaky
-
-# This is a series of tests that do not use the flaky decorator; the flaky
-# behavior is intended to be enabled with the --force-flaky option on the
-# command line.
-
-
-class ExampleTests(TestCase):
-    _threshold = -2
-
-    def test_something_flaky(self):
-        """
-        Flaky will run this test twice.
-        It will fail once and then succeed once.
-        This ensures that we mark tests as flaky even if they don't have a
-        decorator when we use the command-line options.
-        """
-        self._threshold += 1
-        if self._threshold < 0:
-            raise Exception("Threshold is not high enough.")
-
-    @flaky(3, 1)
-    def test_flaky_thing_that_fails_then_succeeds(self):
-        """
-        Flaky will run this test 3 times.
-        It will fail twice and then succeed once.
-        This ensures that the flaky decorator overrides any command-line
-        options we specify.
-        """
-        self._threshold += 1
-        if self._threshold < 1:
-            raise Exception("Threshold is not high enough.")
-
-
-@flaky(3, 1)
-class ExampleFlakyTests(TestCase):
-    _threshold = -1
-
-    def test_flaky_thing_that_fails_then_succeeds(self):
-        """
-        Flaky will run this test 3 times.
-        It will fail twice and then succeed once.
-        This ensures that the flaky decorator on a test suite overrides any
-        command-line options we specify.
-        """
-        self._threshold += 1
-        if self._threshold < 1:
-            raise Exception("Threshold is not high enough.")