Snapshot idea/138.1696 from git://git.jetbrains.org/idea/community.git
Change-Id: I50c97b83a815ce635e49a38380ba5b8765e4b16a
diff --git a/python/helpers/pycharm/_bdd_utils.py b/python/helpers/pycharm/_bdd_utils.py
index 0c92532..300feb2 100644
--- a/python/helpers/pycharm/_bdd_utils.py
+++ b/python/helpers/pycharm/_bdd_utils.py
@@ -27,7 +27,7 @@
assert os.path.exists(what_to_run), "{} does not exist".format(what_to_run)
if os.path.isfile(what_to_run):
- base_dir = os.path.dirname(what_to_run) # User may point to the file directly
+ base_dir = os.path.dirname(what_to_run) # User may point to the file directly
return base_dir, what_to_run
@@ -62,8 +62,11 @@
""""
Runs runner. To be called right after constructor.
"""
- self.tc_messages.testCount(self._get_number_of_tests())
+ number_of_tests = self._get_number_of_tests()
+ self.tc_messages.testCount(number_of_tests)
self.tc_messages.testMatrixEntered()
+ if number_of_tests == 0: # Nothing to run, so no need to report even feature/scenario start. (See PY-13623)
+ return
self._run_tests()
def __gen_location(self, location):
@@ -175,9 +178,9 @@
num_of_steps = 0
for feature in self._get_features_to_run():
if feature.background:
- num_of_steps += len(feature.background.steps) * len(feature.scenarios)
+ num_of_steps += len(list(feature.background.steps)) * len(list(feature.scenarios))
for scenario in feature.scenarios:
- num_of_steps += len(scenario.steps)
+ num_of_steps += len(list(scenario.steps))
return num_of_steps
@abc.abstractmethod
diff --git a/python/helpers/pycharm/behave_runner.py b/python/helpers/pycharm/behave_runner.py
index 4a1b2f6..0ad8313 100644
--- a/python/helpers/pycharm/behave_runner.py
+++ b/python/helpers/pycharm/behave_runner.py
@@ -136,20 +136,23 @@
element.location.file = element.location.filename # To preserve _bdd_utils contract
if isinstance(element, Step):
# Process step
+ step_name = "{} {}".format(element.keyword, element.name)
if is_started:
- self._test_started(element.name, element.location)
+ self._test_started(step_name, element.location)
elif element.status == 'passed':
- self._test_passed(element.name, element.duration)
+ self._test_passed(step_name, element.duration)
elif element.status == 'failed':
try:
trace = traceback.format_exc()
except Exception:
trace = "".join(traceback.format_tb(element.exc_traceback))
- self._test_failed(element.name, element.error_message, trace)
+ if trace in str(element.error_message):
+ trace = None # No reason to duplicate output (see PY-13647)
+ self._test_failed(step_name, element.error_message, trace)
elif element.status == 'undefined':
- self._test_undefined(element.name, element.location)
+ self._test_undefined(step_name, element.location)
else:
- self._test_skipped(element.name, element.status, element.location)
+ self._test_skipped(step_name, element.status, element.location)
elif not is_started and isinstance(element, Scenario) and element.status == 'failed':
# To process scenarios with undefined/skipped tests
for step in element.steps:
diff --git a/python/helpers/pycharm/docrunner.py b/python/helpers/pycharm/docrunner.py
index ad619be..ed9a6f1 100644
--- a/python/helpers/pycharm/docrunner.py
+++ b/python/helpers/pycharm/docrunner.py
@@ -69,6 +69,12 @@
self.messages.testError(self.getTestName(test),
message='Error', details=err)
+ def stopTest(self, test):
+ start = getattr(test, "startTime", datetime.datetime.now())
+ d = datetime.datetime.now() - start
+ duration=d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
+ self.messages.testFinished(self.getTestName(test), duration=int(duration))
+
class DocTestRunner(doctest.DocTestRunner):
"""
Special runner for doctests,